1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/highmem.h>
7*4882a593Smuzhiyun #include <linux/io.h>
8*4882a593Smuzhiyun #include <linux/iopoll.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/scatterlist.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/ktime.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
17*4882a593Smuzhiyun #include <linux/mmc/host.h>
18*4882a593Smuzhiyun #include <linux/mmc/card.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "cqhci.h"
21*4882a593Smuzhiyun #include "cqhci-crypto.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define DCMD_SLOT 31
24*4882a593Smuzhiyun #define NUM_SLOTS 32
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun struct cqhci_slot {
27*4882a593Smuzhiyun struct mmc_request *mrq;
28*4882a593Smuzhiyun unsigned int flags;
29*4882a593Smuzhiyun #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
30*4882a593Smuzhiyun #define CQHCI_COMPLETED BIT(1)
31*4882a593Smuzhiyun #define CQHCI_HOST_CRC BIT(2)
32*4882a593Smuzhiyun #define CQHCI_HOST_TIMEOUT BIT(3)
33*4882a593Smuzhiyun #define CQHCI_HOST_OTHER BIT(4)
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
get_desc(struct cqhci_host * cq_host,u8 tag)36*4882a593Smuzhiyun static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun return cq_host->desc_base + (tag * cq_host->slot_sz);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
get_link_desc(struct cqhci_host * cq_host,u8 tag)41*4882a593Smuzhiyun static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun u8 *desc = get_desc(cq_host, tag);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return desc + cq_host->task_desc_len;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
get_trans_desc_dma(struct cqhci_host * cq_host,u8 tag)48*4882a593Smuzhiyun static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return cq_host->trans_desc_dma_base +
51*4882a593Smuzhiyun (cq_host->mmc->max_segs * tag *
52*4882a593Smuzhiyun cq_host->trans_desc_len);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
get_trans_desc(struct cqhci_host * cq_host,u8 tag)55*4882a593Smuzhiyun static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return cq_host->trans_desc_base +
58*4882a593Smuzhiyun (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
setup_trans_desc(struct cqhci_host * cq_host,u8 tag)61*4882a593Smuzhiyun static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun u8 *link_temp;
64*4882a593Smuzhiyun dma_addr_t trans_temp;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun link_temp = get_link_desc(cq_host, tag);
67*4882a593Smuzhiyun trans_temp = get_trans_desc_dma(cq_host, tag);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun memset(link_temp, 0, cq_host->link_desc_len);
70*4882a593Smuzhiyun if (cq_host->link_desc_len > 8)
71*4882a593Smuzhiyun *(link_temp + 8) = 0;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
74*4882a593Smuzhiyun *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
75*4882a593Smuzhiyun return;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (cq_host->dma64) {
81*4882a593Smuzhiyun __le64 *data_addr = (__le64 __force *)(link_temp + 4);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun data_addr[0] = cpu_to_le64(trans_temp);
84*4882a593Smuzhiyun } else {
85*4882a593Smuzhiyun __le32 *data_addr = (__le32 __force *)(link_temp + 4);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun data_addr[0] = cpu_to_le32(trans_temp);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
cqhci_set_irqs(struct cqhci_host * cq_host,u32 set)91*4882a593Smuzhiyun static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun cqhci_writel(cq_host, set, CQHCI_ISTE);
94*4882a593Smuzhiyun cqhci_writel(cq_host, set, CQHCI_ISGE);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define DRV_NAME "cqhci"
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define CQHCI_DUMP(f, x...) \
100*4882a593Smuzhiyun pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
101*4882a593Smuzhiyun
cqhci_dumpregs(struct cqhci_host * cq_host)102*4882a593Smuzhiyun static void cqhci_dumpregs(struct cqhci_host *cq_host)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct mmc_host *mmc = cq_host->mmc;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
109*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CAP),
110*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_VER));
111*4882a593Smuzhiyun CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
112*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CFG),
113*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CTL));
114*4882a593Smuzhiyun CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
115*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_IS),
116*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_ISTE));
117*4882a593Smuzhiyun CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
118*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_ISGE),
119*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_IC));
120*4882a593Smuzhiyun CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
121*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TDLBA),
122*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TDLBAU));
123*4882a593Smuzhiyun CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
124*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TDBR),
125*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TCN));
126*4882a593Smuzhiyun CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
127*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_DQS),
128*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_DPT));
129*4882a593Smuzhiyun CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
130*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TCLR),
131*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_SSC1));
132*4882a593Smuzhiyun CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
133*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_SSC2),
134*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CRDCT));
135*4882a593Smuzhiyun CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
136*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_RMEM),
137*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_TERRI));
138*4882a593Smuzhiyun CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
139*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CRI),
140*4882a593Smuzhiyun cqhci_readl(cq_host, CQHCI_CRA));
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (cq_host->ops->dumpregs)
143*4882a593Smuzhiyun cq_host->ops->dumpregs(mmc);
144*4882a593Smuzhiyun else
145*4882a593Smuzhiyun CQHCI_DUMP(": ===========================================\n");
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * The allocated descriptor table for task, link & transfer descritors
150*4882a593Smuzhiyun * looks like:
151*4882a593Smuzhiyun * |----------|
152*4882a593Smuzhiyun * |task desc | |->|----------|
153*4882a593Smuzhiyun * |----------| | |trans desc|
154*4882a593Smuzhiyun * |link desc-|->| |----------|
155*4882a593Smuzhiyun * |----------| .
156*4882a593Smuzhiyun * . .
157*4882a593Smuzhiyun * no. of slots max-segs
158*4882a593Smuzhiyun * . |----------|
159*4882a593Smuzhiyun * |----------|
160*4882a593Smuzhiyun * The idea here is to create the [task+trans] table and mark & point the
161*4882a593Smuzhiyun * link desc to the transfer desc table on a per slot basis.
162*4882a593Smuzhiyun */
cqhci_host_alloc_tdl(struct cqhci_host * cq_host)163*4882a593Smuzhiyun static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun int i = 0;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* task descriptor can be 64/128 bit irrespective of arch */
168*4882a593Smuzhiyun if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
169*4882a593Smuzhiyun cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
170*4882a593Smuzhiyun CQHCI_TASK_DESC_SZ, CQHCI_CFG);
171*4882a593Smuzhiyun cq_host->task_desc_len = 16;
172*4882a593Smuzhiyun } else {
173*4882a593Smuzhiyun cq_host->task_desc_len = 8;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * 96 bits length of transfer desc instead of 128 bits which means
178*4882a593Smuzhiyun * ADMA would expect next valid descriptor at the 96th bit
179*4882a593Smuzhiyun * or 128th bit
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (cq_host->dma64) {
182*4882a593Smuzhiyun if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
183*4882a593Smuzhiyun cq_host->trans_desc_len = 12;
184*4882a593Smuzhiyun else
185*4882a593Smuzhiyun cq_host->trans_desc_len = 16;
186*4882a593Smuzhiyun cq_host->link_desc_len = 16;
187*4882a593Smuzhiyun } else {
188*4882a593Smuzhiyun cq_host->trans_desc_len = 8;
189*4882a593Smuzhiyun cq_host->link_desc_len = 8;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* total size of a slot: 1 task & 1 transfer (link) */
193*4882a593Smuzhiyun cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
198*4882a593Smuzhiyun cq_host->mmc->cqe_qdepth;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
201*4882a593Smuzhiyun mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
202*4882a593Smuzhiyun cq_host->slot_sz);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * allocate a dma-mapped chunk of memory for the descriptors
206*4882a593Smuzhiyun * allocate a dma-mapped chunk of memory for link descriptors
207*4882a593Smuzhiyun * setup each link-desc memory offset per slot-number to
208*4882a593Smuzhiyun * the descriptor table.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
211*4882a593Smuzhiyun cq_host->desc_size,
212*4882a593Smuzhiyun &cq_host->desc_dma_base,
213*4882a593Smuzhiyun GFP_KERNEL);
214*4882a593Smuzhiyun if (!cq_host->desc_base)
215*4882a593Smuzhiyun return -ENOMEM;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
218*4882a593Smuzhiyun cq_host->data_size,
219*4882a593Smuzhiyun &cq_host->trans_desc_dma_base,
220*4882a593Smuzhiyun GFP_KERNEL);
221*4882a593Smuzhiyun if (!cq_host->trans_desc_base) {
222*4882a593Smuzhiyun dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
223*4882a593Smuzhiyun cq_host->desc_base,
224*4882a593Smuzhiyun cq_host->desc_dma_base);
225*4882a593Smuzhiyun cq_host->desc_base = NULL;
226*4882a593Smuzhiyun cq_host->desc_dma_base = 0;
227*4882a593Smuzhiyun return -ENOMEM;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
231*4882a593Smuzhiyun mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
232*4882a593Smuzhiyun (unsigned long long)cq_host->desc_dma_base,
233*4882a593Smuzhiyun (unsigned long long)cq_host->trans_desc_dma_base);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun for (; i < (cq_host->num_slots); i++)
236*4882a593Smuzhiyun setup_trans_desc(cq_host, i);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
__cqhci_enable(struct cqhci_host * cq_host)241*4882a593Smuzhiyun static void __cqhci_enable(struct cqhci_host *cq_host)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct mmc_host *mmc = cq_host->mmc;
244*4882a593Smuzhiyun u32 cqcfg;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Configuration must not be changed while enabled */
249*4882a593Smuzhiyun if (cqcfg & CQHCI_ENABLE) {
250*4882a593Smuzhiyun cqcfg &= ~CQHCI_ENABLE;
251*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
257*4882a593Smuzhiyun cqcfg |= CQHCI_DCMD;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
260*4882a593Smuzhiyun cqcfg |= CQHCI_TASK_DESC_SZ;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (mmc->caps2 & MMC_CAP2_CRYPTO)
263*4882a593Smuzhiyun cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
268*4882a593Smuzhiyun CQHCI_TDLBA);
269*4882a593Smuzhiyun cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
270*4882a593Smuzhiyun CQHCI_TDLBAU);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun cqhci_set_irqs(cq_host, 0);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun cqcfg |= CQHCI_ENABLE;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
281*4882a593Smuzhiyun cqhci_writel(cq_host, 0, CQHCI_CTL);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun mmc->cqe_on = true;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (cq_host->ops->enable)
286*4882a593Smuzhiyun cq_host->ops->enable(mmc);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Ensure all writes are done before interrupts are enabled */
289*4882a593Smuzhiyun wmb();
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun cq_host->activated = true;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
__cqhci_disable(struct cqhci_host * cq_host)296*4882a593Smuzhiyun static void __cqhci_disable(struct cqhci_host *cq_host)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun u32 cqcfg;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
301*4882a593Smuzhiyun cqcfg &= ~CQHCI_ENABLE;
302*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun cq_host->mmc->cqe_on = false;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun cq_host->activated = false;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
cqhci_deactivate(struct mmc_host * mmc)309*4882a593Smuzhiyun int cqhci_deactivate(struct mmc_host *mmc)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (cq_host->enabled && cq_host->activated)
314*4882a593Smuzhiyun __cqhci_disable(cq_host);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun EXPORT_SYMBOL(cqhci_deactivate);
319*4882a593Smuzhiyun
cqhci_resume(struct mmc_host * mmc)320*4882a593Smuzhiyun int cqhci_resume(struct mmc_host *mmc)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun /* Re-enable is done upon first request */
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun EXPORT_SYMBOL(cqhci_resume);
326*4882a593Smuzhiyun
cqhci_enable(struct mmc_host * mmc,struct mmc_card * card)327*4882a593Smuzhiyun static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
330*4882a593Smuzhiyun int err;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (!card->ext_csd.cmdq_en)
333*4882a593Smuzhiyun return -EINVAL;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (cq_host->enabled)
336*4882a593Smuzhiyun return 0;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun cq_host->rca = card->rca;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun err = cqhci_host_alloc_tdl(cq_host);
341*4882a593Smuzhiyun if (err) {
342*4882a593Smuzhiyun pr_err("%s: Failed to enable CQE, error %d\n",
343*4882a593Smuzhiyun mmc_hostname(mmc), err);
344*4882a593Smuzhiyun return err;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun __cqhci_enable(cq_host);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun cq_host->enabled = true;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun #ifdef DEBUG
352*4882a593Smuzhiyun cqhci_dumpregs(cq_host);
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* CQHCI is idle and should halt immediately, so set a small timeout */
358*4882a593Smuzhiyun #define CQHCI_OFF_TIMEOUT 100
359*4882a593Smuzhiyun
cqhci_read_ctl(struct cqhci_host * cq_host)360*4882a593Smuzhiyun static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun return cqhci_readl(cq_host, CQHCI_CTL);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
cqhci_off(struct mmc_host * mmc)365*4882a593Smuzhiyun static void cqhci_off(struct mmc_host *mmc)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
368*4882a593Smuzhiyun u32 reg;
369*4882a593Smuzhiyun int err;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
372*4882a593Smuzhiyun return;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (cq_host->ops->disable)
375*4882a593Smuzhiyun cq_host->ops->disable(mmc, false);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
380*4882a593Smuzhiyun reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
381*4882a593Smuzhiyun if (err < 0)
382*4882a593Smuzhiyun pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (cq_host->ops->post_disable)
387*4882a593Smuzhiyun cq_host->ops->post_disable(mmc);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun mmc->cqe_on = false;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
cqhci_disable(struct mmc_host * mmc)392*4882a593Smuzhiyun static void cqhci_disable(struct mmc_host *mmc)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (!cq_host->enabled)
397*4882a593Smuzhiyun return;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun cqhci_off(mmc);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun __cqhci_disable(cq_host);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
404*4882a593Smuzhiyun cq_host->trans_desc_base,
405*4882a593Smuzhiyun cq_host->trans_desc_dma_base);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
408*4882a593Smuzhiyun cq_host->desc_base,
409*4882a593Smuzhiyun cq_host->desc_dma_base);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun cq_host->trans_desc_base = NULL;
412*4882a593Smuzhiyun cq_host->desc_base = NULL;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun cq_host->enabled = false;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
cqhci_prep_task_desc(struct mmc_request * mrq,struct cqhci_host * cq_host,int tag)417*4882a593Smuzhiyun static void cqhci_prep_task_desc(struct mmc_request *mrq,
418*4882a593Smuzhiyun struct cqhci_host *cq_host, int tag)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
421*4882a593Smuzhiyun u32 req_flags = mrq->data->flags;
422*4882a593Smuzhiyun u64 desc0;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun desc0 = CQHCI_VALID(1) |
425*4882a593Smuzhiyun CQHCI_END(1) |
426*4882a593Smuzhiyun CQHCI_INT(1) |
427*4882a593Smuzhiyun CQHCI_ACT(0x5) |
428*4882a593Smuzhiyun CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
429*4882a593Smuzhiyun CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
430*4882a593Smuzhiyun CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
431*4882a593Smuzhiyun CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
432*4882a593Smuzhiyun CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
433*4882a593Smuzhiyun CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
434*4882a593Smuzhiyun CQHCI_BLK_COUNT(mrq->data->blocks) |
435*4882a593Smuzhiyun CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun task_desc[0] = cpu_to_le64(desc0);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
440*4882a593Smuzhiyun u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun task_desc[1] = cpu_to_le64(desc1);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
445*4882a593Smuzhiyun mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
446*4882a593Smuzhiyun } else {
447*4882a593Smuzhiyun pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
448*4882a593Smuzhiyun mmc_hostname(mrq->host), mrq->tag, desc0);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
cqhci_dma_map(struct mmc_host * host,struct mmc_request * mrq)452*4882a593Smuzhiyun static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun int sg_count;
455*4882a593Smuzhiyun struct mmc_data *data = mrq->data;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (!data)
458*4882a593Smuzhiyun return -EINVAL;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun sg_count = dma_map_sg(mmc_dev(host), data->sg,
461*4882a593Smuzhiyun data->sg_len,
462*4882a593Smuzhiyun (data->flags & MMC_DATA_WRITE) ?
463*4882a593Smuzhiyun DMA_TO_DEVICE : DMA_FROM_DEVICE);
464*4882a593Smuzhiyun if (!sg_count) {
465*4882a593Smuzhiyun pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
466*4882a593Smuzhiyun return -ENOMEM;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return sg_count;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
cqhci_set_tran_desc(u8 * desc,dma_addr_t addr,int len,bool end,bool dma64)472*4882a593Smuzhiyun static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
473*4882a593Smuzhiyun bool dma64)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun __le32 *attr = (__le32 __force *)desc;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun *attr = (CQHCI_VALID(1) |
478*4882a593Smuzhiyun CQHCI_END(end ? 1 : 0) |
479*4882a593Smuzhiyun CQHCI_INT(0) |
480*4882a593Smuzhiyun CQHCI_ACT(0x4) |
481*4882a593Smuzhiyun CQHCI_DAT_LENGTH(len));
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (dma64) {
484*4882a593Smuzhiyun __le64 *dataddr = (__le64 __force *)(desc + 4);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun dataddr[0] = cpu_to_le64(addr);
487*4882a593Smuzhiyun } else {
488*4882a593Smuzhiyun __le32 *dataddr = (__le32 __force *)(desc + 4);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun dataddr[0] = cpu_to_le32(addr);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
cqhci_prep_tran_desc(struct mmc_request * mrq,struct cqhci_host * cq_host,int tag)494*4882a593Smuzhiyun static int cqhci_prep_tran_desc(struct mmc_request *mrq,
495*4882a593Smuzhiyun struct cqhci_host *cq_host, int tag)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct mmc_data *data = mrq->data;
498*4882a593Smuzhiyun int i, sg_count, len;
499*4882a593Smuzhiyun bool end = false;
500*4882a593Smuzhiyun bool dma64 = cq_host->dma64;
501*4882a593Smuzhiyun dma_addr_t addr;
502*4882a593Smuzhiyun u8 *desc;
503*4882a593Smuzhiyun struct scatterlist *sg;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun sg_count = cqhci_dma_map(mrq->host, mrq);
506*4882a593Smuzhiyun if (sg_count < 0) {
507*4882a593Smuzhiyun pr_err("%s: %s: unable to map sg lists, %d\n",
508*4882a593Smuzhiyun mmc_hostname(mrq->host), __func__, sg_count);
509*4882a593Smuzhiyun return sg_count;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun desc = get_trans_desc(cq_host, tag);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun for_each_sg(data->sg, sg, sg_count, i) {
515*4882a593Smuzhiyun addr = sg_dma_address(sg);
516*4882a593Smuzhiyun len = sg_dma_len(sg);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if ((i+1) == sg_count)
519*4882a593Smuzhiyun end = true;
520*4882a593Smuzhiyun cqhci_set_tran_desc(desc, addr, len, end, dma64);
521*4882a593Smuzhiyun desc += cq_host->trans_desc_len;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
cqhci_prep_dcmd_desc(struct mmc_host * mmc,struct mmc_request * mrq)527*4882a593Smuzhiyun static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
528*4882a593Smuzhiyun struct mmc_request *mrq)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun u64 *task_desc = NULL;
531*4882a593Smuzhiyun u64 data = 0;
532*4882a593Smuzhiyun u8 resp_type;
533*4882a593Smuzhiyun u8 *desc;
534*4882a593Smuzhiyun __le64 *dataddr;
535*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
536*4882a593Smuzhiyun u8 timing;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
539*4882a593Smuzhiyun resp_type = 0x0;
540*4882a593Smuzhiyun timing = 0x1;
541*4882a593Smuzhiyun } else {
542*4882a593Smuzhiyun if (mrq->cmd->flags & MMC_RSP_R1B) {
543*4882a593Smuzhiyun resp_type = 0x3;
544*4882a593Smuzhiyun timing = 0x0;
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun resp_type = 0x2;
547*4882a593Smuzhiyun timing = 0x1;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
552*4882a593Smuzhiyun memset(task_desc, 0, cq_host->task_desc_len);
553*4882a593Smuzhiyun data |= (CQHCI_VALID(1) |
554*4882a593Smuzhiyun CQHCI_END(1) |
555*4882a593Smuzhiyun CQHCI_INT(1) |
556*4882a593Smuzhiyun CQHCI_QBAR(1) |
557*4882a593Smuzhiyun CQHCI_ACT(0x5) |
558*4882a593Smuzhiyun CQHCI_CMD_INDEX(mrq->cmd->opcode) |
559*4882a593Smuzhiyun CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
560*4882a593Smuzhiyun if (cq_host->ops->update_dcmd_desc)
561*4882a593Smuzhiyun cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
562*4882a593Smuzhiyun *task_desc |= data;
563*4882a593Smuzhiyun desc = (u8 *)task_desc;
564*4882a593Smuzhiyun pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
565*4882a593Smuzhiyun mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
566*4882a593Smuzhiyun dataddr = (__le64 __force *)(desc + 4);
567*4882a593Smuzhiyun dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
cqhci_post_req(struct mmc_host * host,struct mmc_request * mrq)571*4882a593Smuzhiyun static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct mmc_data *data = mrq->data;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (data) {
576*4882a593Smuzhiyun dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
577*4882a593Smuzhiyun (data->flags & MMC_DATA_READ) ?
578*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
cqhci_tag(struct mmc_request * mrq)582*4882a593Smuzhiyun static inline int cqhci_tag(struct mmc_request *mrq)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun return mrq->cmd ? DCMD_SLOT : mrq->tag;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
cqhci_request(struct mmc_host * mmc,struct mmc_request * mrq)587*4882a593Smuzhiyun static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun int err = 0;
590*4882a593Smuzhiyun int tag = cqhci_tag(mrq);
591*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
592*4882a593Smuzhiyun unsigned long flags;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (!cq_host->enabled) {
595*4882a593Smuzhiyun pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
596*4882a593Smuzhiyun return -EINVAL;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* First request after resume has to re-enable */
600*4882a593Smuzhiyun if (!cq_host->activated)
601*4882a593Smuzhiyun __cqhci_enable(cq_host);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (!mmc->cqe_on) {
604*4882a593Smuzhiyun if (cq_host->ops->pre_enable)
605*4882a593Smuzhiyun cq_host->ops->pre_enable(mmc);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun cqhci_writel(cq_host, 0, CQHCI_CTL);
608*4882a593Smuzhiyun mmc->cqe_on = true;
609*4882a593Smuzhiyun pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
610*4882a593Smuzhiyun if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
611*4882a593Smuzhiyun pr_err("%s: cqhci: CQE failed to exit halt state\n",
612*4882a593Smuzhiyun mmc_hostname(mmc));
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun if (cq_host->ops->enable)
615*4882a593Smuzhiyun cq_host->ops->enable(mmc);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (mrq->data) {
619*4882a593Smuzhiyun cqhci_prep_task_desc(mrq, cq_host, tag);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun err = cqhci_prep_tran_desc(mrq, cq_host, tag);
622*4882a593Smuzhiyun if (err) {
623*4882a593Smuzhiyun pr_err("%s: cqhci: failed to setup tx desc: %d\n",
624*4882a593Smuzhiyun mmc_hostname(mmc), err);
625*4882a593Smuzhiyun return err;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun } else {
628*4882a593Smuzhiyun cqhci_prep_dcmd_desc(mmc, mrq);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun spin_lock_irqsave(&cq_host->lock, flags);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (cq_host->recovery_halt) {
634*4882a593Smuzhiyun err = -EBUSY;
635*4882a593Smuzhiyun goto out_unlock;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun cq_host->slot[tag].mrq = mrq;
639*4882a593Smuzhiyun cq_host->slot[tag].flags = 0;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun cq_host->qcnt += 1;
642*4882a593Smuzhiyun /* Make sure descriptors are ready before ringing the doorbell */
643*4882a593Smuzhiyun wmb();
644*4882a593Smuzhiyun cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
645*4882a593Smuzhiyun if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
646*4882a593Smuzhiyun pr_debug("%s: cqhci: doorbell not set for tag %d\n",
647*4882a593Smuzhiyun mmc_hostname(mmc), tag);
648*4882a593Smuzhiyun out_unlock:
649*4882a593Smuzhiyun spin_unlock_irqrestore(&cq_host->lock, flags);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (err)
652*4882a593Smuzhiyun cqhci_post_req(mmc, mrq);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return err;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
cqhci_recovery_needed(struct mmc_host * mmc,struct mmc_request * mrq,bool notify)657*4882a593Smuzhiyun static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
658*4882a593Smuzhiyun bool notify)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (!cq_host->recovery_halt) {
663*4882a593Smuzhiyun cq_host->recovery_halt = true;
664*4882a593Smuzhiyun pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
665*4882a593Smuzhiyun wake_up(&cq_host->wait_queue);
666*4882a593Smuzhiyun if (notify && mrq->recovery_notifier)
667*4882a593Smuzhiyun mrq->recovery_notifier(mrq);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
cqhci_error_flags(int error1,int error2)671*4882a593Smuzhiyun static unsigned int cqhci_error_flags(int error1, int error2)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun int error = error1 ? error1 : error2;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun switch (error) {
676*4882a593Smuzhiyun case -EILSEQ:
677*4882a593Smuzhiyun return CQHCI_HOST_CRC;
678*4882a593Smuzhiyun case -ETIMEDOUT:
679*4882a593Smuzhiyun return CQHCI_HOST_TIMEOUT;
680*4882a593Smuzhiyun default:
681*4882a593Smuzhiyun return CQHCI_HOST_OTHER;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
cqhci_error_irq(struct mmc_host * mmc,u32 status,int cmd_error,int data_error)685*4882a593Smuzhiyun static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
686*4882a593Smuzhiyun int data_error)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
689*4882a593Smuzhiyun struct cqhci_slot *slot;
690*4882a593Smuzhiyun u32 terri;
691*4882a593Smuzhiyun u32 tdpe;
692*4882a593Smuzhiyun int tag;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun spin_lock(&cq_host->lock);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun terri = cqhci_readl(cq_host, CQHCI_TERRI);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
699*4882a593Smuzhiyun mmc_hostname(mmc), status, cmd_error, data_error, terri);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* Forget about errors when recovery has already been triggered */
702*4882a593Smuzhiyun if (cq_host->recovery_halt)
703*4882a593Smuzhiyun goto out_unlock;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (!cq_host->qcnt) {
706*4882a593Smuzhiyun WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
707*4882a593Smuzhiyun mmc_hostname(mmc), status, cmd_error, data_error,
708*4882a593Smuzhiyun terri);
709*4882a593Smuzhiyun goto out_unlock;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (CQHCI_TERRI_C_VALID(terri)) {
713*4882a593Smuzhiyun tag = CQHCI_TERRI_C_TASK(terri);
714*4882a593Smuzhiyun slot = &cq_host->slot[tag];
715*4882a593Smuzhiyun if (slot->mrq) {
716*4882a593Smuzhiyun slot->flags = cqhci_error_flags(cmd_error, data_error);
717*4882a593Smuzhiyun cqhci_recovery_needed(mmc, slot->mrq, true);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (CQHCI_TERRI_D_VALID(terri)) {
722*4882a593Smuzhiyun tag = CQHCI_TERRI_D_TASK(terri);
723*4882a593Smuzhiyun slot = &cq_host->slot[tag];
724*4882a593Smuzhiyun if (slot->mrq) {
725*4882a593Smuzhiyun slot->flags = cqhci_error_flags(data_error, cmd_error);
726*4882a593Smuzhiyun cqhci_recovery_needed(mmc, slot->mrq, true);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun * Handle ICCE ("Invalid Crypto Configuration Error"). This should
732*4882a593Smuzhiyun * never happen, since the block layer ensures that all crypto-enabled
733*4882a593Smuzhiyun * I/O requests have a valid keyslot before they reach the driver.
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun * Note that GCE ("General Crypto Error") is different; it already got
736*4882a593Smuzhiyun * handled above by checking TERRI.
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun if (status & CQHCI_IS_ICCE) {
739*4882a593Smuzhiyun tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
740*4882a593Smuzhiyun WARN_ONCE(1,
741*4882a593Smuzhiyun "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
742*4882a593Smuzhiyun mmc_hostname(mmc), status, tdpe);
743*4882a593Smuzhiyun while (tdpe != 0) {
744*4882a593Smuzhiyun tag = __ffs(tdpe);
745*4882a593Smuzhiyun tdpe &= ~(1 << tag);
746*4882a593Smuzhiyun slot = &cq_host->slot[tag];
747*4882a593Smuzhiyun if (!slot->mrq)
748*4882a593Smuzhiyun continue;
749*4882a593Smuzhiyun slot->flags = cqhci_error_flags(data_error, cmd_error);
750*4882a593Smuzhiyun cqhci_recovery_needed(mmc, slot->mrq, true);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (!cq_host->recovery_halt) {
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * The only way to guarantee forward progress is to mark at
757*4882a593Smuzhiyun * least one task in error, so if none is indicated, pick one.
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun for (tag = 0; tag < NUM_SLOTS; tag++) {
760*4882a593Smuzhiyun slot = &cq_host->slot[tag];
761*4882a593Smuzhiyun if (!slot->mrq)
762*4882a593Smuzhiyun continue;
763*4882a593Smuzhiyun slot->flags = cqhci_error_flags(data_error, cmd_error);
764*4882a593Smuzhiyun cqhci_recovery_needed(mmc, slot->mrq, true);
765*4882a593Smuzhiyun break;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun out_unlock:
770*4882a593Smuzhiyun spin_unlock(&cq_host->lock);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
cqhci_finish_mrq(struct mmc_host * mmc,unsigned int tag)773*4882a593Smuzhiyun static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
776*4882a593Smuzhiyun struct cqhci_slot *slot = &cq_host->slot[tag];
777*4882a593Smuzhiyun struct mmc_request *mrq = slot->mrq;
778*4882a593Smuzhiyun struct mmc_data *data;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (!mrq) {
781*4882a593Smuzhiyun WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
782*4882a593Smuzhiyun mmc_hostname(mmc), tag);
783*4882a593Smuzhiyun return;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* No completions allowed during recovery */
787*4882a593Smuzhiyun if (cq_host->recovery_halt) {
788*4882a593Smuzhiyun slot->flags |= CQHCI_COMPLETED;
789*4882a593Smuzhiyun return;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun slot->mrq = NULL;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun cq_host->qcnt -= 1;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun data = mrq->data;
797*4882a593Smuzhiyun if (data) {
798*4882a593Smuzhiyun if (data->error)
799*4882a593Smuzhiyun data->bytes_xfered = 0;
800*4882a593Smuzhiyun else
801*4882a593Smuzhiyun data->bytes_xfered = data->blksz * data->blocks;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun mmc_cqe_request_done(mmc, mrq);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
cqhci_irq(struct mmc_host * mmc,u32 intmask,int cmd_error,int data_error)807*4882a593Smuzhiyun irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
808*4882a593Smuzhiyun int data_error)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun u32 status;
811*4882a593Smuzhiyun unsigned long tag = 0, comp_status;
812*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun status = cqhci_readl(cq_host, CQHCI_IS);
815*4882a593Smuzhiyun cqhci_writel(cq_host, status, CQHCI_IS);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
820*4882a593Smuzhiyun cmd_error || data_error)
821*4882a593Smuzhiyun cqhci_error_irq(mmc, status, cmd_error, data_error);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (status & CQHCI_IS_TCC) {
824*4882a593Smuzhiyun /* read TCN and complete the request */
825*4882a593Smuzhiyun comp_status = cqhci_readl(cq_host, CQHCI_TCN);
826*4882a593Smuzhiyun cqhci_writel(cq_host, comp_status, CQHCI_TCN);
827*4882a593Smuzhiyun pr_debug("%s: cqhci: TCN: 0x%08lx\n",
828*4882a593Smuzhiyun mmc_hostname(mmc), comp_status);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun spin_lock(&cq_host->lock);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
833*4882a593Smuzhiyun /* complete the corresponding mrq */
834*4882a593Smuzhiyun pr_debug("%s: cqhci: completing tag %lu\n",
835*4882a593Smuzhiyun mmc_hostname(mmc), tag);
836*4882a593Smuzhiyun cqhci_finish_mrq(mmc, tag);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (cq_host->waiting_for_idle && !cq_host->qcnt) {
840*4882a593Smuzhiyun cq_host->waiting_for_idle = false;
841*4882a593Smuzhiyun wake_up(&cq_host->wait_queue);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun spin_unlock(&cq_host->lock);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (status & CQHCI_IS_TCL)
848*4882a593Smuzhiyun wake_up(&cq_host->wait_queue);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (status & CQHCI_IS_HAC)
851*4882a593Smuzhiyun wake_up(&cq_host->wait_queue);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return IRQ_HANDLED;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun EXPORT_SYMBOL(cqhci_irq);
856*4882a593Smuzhiyun
cqhci_is_idle(struct cqhci_host * cq_host,int * ret)857*4882a593Smuzhiyun static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun unsigned long flags;
860*4882a593Smuzhiyun bool is_idle;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun spin_lock_irqsave(&cq_host->lock, flags);
863*4882a593Smuzhiyun is_idle = !cq_host->qcnt || cq_host->recovery_halt;
864*4882a593Smuzhiyun *ret = cq_host->recovery_halt ? -EBUSY : 0;
865*4882a593Smuzhiyun cq_host->waiting_for_idle = !is_idle;
866*4882a593Smuzhiyun spin_unlock_irqrestore(&cq_host->lock, flags);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return is_idle;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
cqhci_wait_for_idle(struct mmc_host * mmc)871*4882a593Smuzhiyun static int cqhci_wait_for_idle(struct mmc_host *mmc)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
874*4882a593Smuzhiyun int ret;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return ret;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
cqhci_timeout(struct mmc_host * mmc,struct mmc_request * mrq,bool * recovery_needed)881*4882a593Smuzhiyun static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
882*4882a593Smuzhiyun bool *recovery_needed)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
885*4882a593Smuzhiyun int tag = cqhci_tag(mrq);
886*4882a593Smuzhiyun struct cqhci_slot *slot = &cq_host->slot[tag];
887*4882a593Smuzhiyun unsigned long flags;
888*4882a593Smuzhiyun bool timed_out;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun spin_lock_irqsave(&cq_host->lock, flags);
891*4882a593Smuzhiyun timed_out = slot->mrq == mrq;
892*4882a593Smuzhiyun if (timed_out) {
893*4882a593Smuzhiyun slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
894*4882a593Smuzhiyun cqhci_recovery_needed(mmc, mrq, false);
895*4882a593Smuzhiyun *recovery_needed = cq_host->recovery_halt;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun spin_unlock_irqrestore(&cq_host->lock, flags);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (timed_out) {
900*4882a593Smuzhiyun pr_err("%s: cqhci: timeout for tag %d\n",
901*4882a593Smuzhiyun mmc_hostname(mmc), tag);
902*4882a593Smuzhiyun cqhci_dumpregs(cq_host);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun return timed_out;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
cqhci_tasks_cleared(struct cqhci_host * cq_host)908*4882a593Smuzhiyun static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
cqhci_clear_all_tasks(struct mmc_host * mmc,unsigned int timeout)913*4882a593Smuzhiyun static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
916*4882a593Smuzhiyun bool ret;
917*4882a593Smuzhiyun u32 ctl;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ctl = cqhci_readl(cq_host, CQHCI_CTL);
922*4882a593Smuzhiyun ctl |= CQHCI_CLEAR_ALL_TASKS;
923*4882a593Smuzhiyun cqhci_writel(cq_host, ctl, CQHCI_CTL);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
926*4882a593Smuzhiyun msecs_to_jiffies(timeout) + 1);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun cqhci_set_irqs(cq_host, 0);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun ret = cqhci_tasks_cleared(cq_host);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (!ret)
933*4882a593Smuzhiyun pr_debug("%s: cqhci: Failed to clear tasks\n",
934*4882a593Smuzhiyun mmc_hostname(mmc));
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun return ret;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
cqhci_halted(struct cqhci_host * cq_host)939*4882a593Smuzhiyun static bool cqhci_halted(struct cqhci_host *cq_host)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
cqhci_halt(struct mmc_host * mmc,unsigned int timeout)944*4882a593Smuzhiyun static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
947*4882a593Smuzhiyun bool ret;
948*4882a593Smuzhiyun u32 ctl;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (cqhci_halted(cq_host))
951*4882a593Smuzhiyun return true;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun ctl = cqhci_readl(cq_host, CQHCI_CTL);
956*4882a593Smuzhiyun ctl |= CQHCI_HALT;
957*4882a593Smuzhiyun cqhci_writel(cq_host, ctl, CQHCI_CTL);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
960*4882a593Smuzhiyun msecs_to_jiffies(timeout) + 1);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun cqhci_set_irqs(cq_host, 0);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun ret = cqhci_halted(cq_host);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (!ret)
967*4882a593Smuzhiyun pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun return ret;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * After halting we expect to be able to use the command line. We interpret the
974*4882a593Smuzhiyun * failure to halt to mean the data lines might still be in use (and the upper
975*4882a593Smuzhiyun * layers will need to send a STOP command), so we set the timeout based on a
976*4882a593Smuzhiyun * generous command timeout.
977*4882a593Smuzhiyun */
978*4882a593Smuzhiyun #define CQHCI_START_HALT_TIMEOUT 5
979*4882a593Smuzhiyun
cqhci_recovery_start(struct mmc_host * mmc)980*4882a593Smuzhiyun static void cqhci_recovery_start(struct mmc_host *mmc)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun WARN_ON(!cq_host->recovery_halt);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (cq_host->ops->disable)
991*4882a593Smuzhiyun cq_host->ops->disable(mmc, true);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun mmc->cqe_on = false;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
cqhci_error_from_flags(unsigned int flags)996*4882a593Smuzhiyun static int cqhci_error_from_flags(unsigned int flags)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun if (!flags)
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* CRC errors might indicate re-tuning so prefer to report that */
1002*4882a593Smuzhiyun if (flags & CQHCI_HOST_CRC)
1003*4882a593Smuzhiyun return -EILSEQ;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1006*4882a593Smuzhiyun return -ETIMEDOUT;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun return -EIO;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
cqhci_recover_mrq(struct cqhci_host * cq_host,unsigned int tag)1011*4882a593Smuzhiyun static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun struct cqhci_slot *slot = &cq_host->slot[tag];
1014*4882a593Smuzhiyun struct mmc_request *mrq = slot->mrq;
1015*4882a593Smuzhiyun struct mmc_data *data;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (!mrq)
1018*4882a593Smuzhiyun return;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun slot->mrq = NULL;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun cq_host->qcnt -= 1;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun data = mrq->data;
1025*4882a593Smuzhiyun if (data) {
1026*4882a593Smuzhiyun data->bytes_xfered = 0;
1027*4882a593Smuzhiyun data->error = cqhci_error_from_flags(slot->flags);
1028*4882a593Smuzhiyun } else {
1029*4882a593Smuzhiyun mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun mmc_cqe_request_done(cq_host->mmc, mrq);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
cqhci_recover_mrqs(struct cqhci_host * cq_host)1035*4882a593Smuzhiyun static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun int i;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun for (i = 0; i < cq_host->num_slots; i++)
1040*4882a593Smuzhiyun cqhci_recover_mrq(cq_host, i);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun * By now the command and data lines should be unused so there is no reason for
1045*4882a593Smuzhiyun * CQHCI to take a long time to halt, but if it doesn't halt there could be
1046*4882a593Smuzhiyun * problems clearing tasks, so be generous.
1047*4882a593Smuzhiyun */
1048*4882a593Smuzhiyun #define CQHCI_FINISH_HALT_TIMEOUT 20
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* CQHCI could be expected to clear it's internal state pretty quickly */
1051*4882a593Smuzhiyun #define CQHCI_CLEAR_TIMEOUT 20
1052*4882a593Smuzhiyun
cqhci_recovery_finish(struct mmc_host * mmc)1053*4882a593Smuzhiyun static void cqhci_recovery_finish(struct mmc_host *mmc)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun struct cqhci_host *cq_host = mmc->cqe_private;
1056*4882a593Smuzhiyun unsigned long flags;
1057*4882a593Smuzhiyun u32 cqcfg;
1058*4882a593Smuzhiyun bool ok;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun WARN_ON(!cq_host->recovery_halt);
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1067*4882a593Smuzhiyun ok = false;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /*
1070*4882a593Smuzhiyun * The specification contradicts itself, by saying that tasks cannot be
1071*4882a593Smuzhiyun * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1072*4882a593Smuzhiyun * be disabled/re-enabled, but not to disable before clearing tasks.
1073*4882a593Smuzhiyun * Have a go anyway.
1074*4882a593Smuzhiyun */
1075*4882a593Smuzhiyun if (!ok) {
1076*4882a593Smuzhiyun pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1077*4882a593Smuzhiyun cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1078*4882a593Smuzhiyun cqcfg &= ~CQHCI_ENABLE;
1079*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1080*4882a593Smuzhiyun cqcfg |= CQHCI_ENABLE;
1081*4882a593Smuzhiyun cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1082*4882a593Smuzhiyun /* Be sure that there are no tasks */
1083*4882a593Smuzhiyun ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1084*4882a593Smuzhiyun if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1085*4882a593Smuzhiyun ok = false;
1086*4882a593Smuzhiyun WARN_ON(!ok);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun cqhci_recover_mrqs(cq_host);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun WARN_ON(cq_host->qcnt);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun spin_lock_irqsave(&cq_host->lock, flags);
1094*4882a593Smuzhiyun cq_host->qcnt = 0;
1095*4882a593Smuzhiyun cq_host->recovery_halt = false;
1096*4882a593Smuzhiyun mmc->cqe_on = false;
1097*4882a593Smuzhiyun spin_unlock_irqrestore(&cq_host->lock, flags);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /* Ensure all writes are done before interrupts are re-enabled */
1100*4882a593Smuzhiyun wmb();
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun static const struct mmc_cqe_ops cqhci_cqe_ops = {
1110*4882a593Smuzhiyun .cqe_enable = cqhci_enable,
1111*4882a593Smuzhiyun .cqe_disable = cqhci_disable,
1112*4882a593Smuzhiyun .cqe_request = cqhci_request,
1113*4882a593Smuzhiyun .cqe_post_req = cqhci_post_req,
1114*4882a593Smuzhiyun .cqe_off = cqhci_off,
1115*4882a593Smuzhiyun .cqe_wait_for_idle = cqhci_wait_for_idle,
1116*4882a593Smuzhiyun .cqe_timeout = cqhci_timeout,
1117*4882a593Smuzhiyun .cqe_recovery_start = cqhci_recovery_start,
1118*4882a593Smuzhiyun .cqe_recovery_finish = cqhci_recovery_finish,
1119*4882a593Smuzhiyun };
1120*4882a593Smuzhiyun
cqhci_pltfm_init(struct platform_device * pdev)1121*4882a593Smuzhiyun struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun struct cqhci_host *cq_host;
1124*4882a593Smuzhiyun struct resource *cqhci_memres = NULL;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /* check and setup CMDQ interface */
1127*4882a593Smuzhiyun cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1128*4882a593Smuzhiyun "cqhci");
1129*4882a593Smuzhiyun if (!cqhci_memres) {
1130*4882a593Smuzhiyun dev_dbg(&pdev->dev, "CMDQ not supported\n");
1131*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1135*4882a593Smuzhiyun if (!cq_host)
1136*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1137*4882a593Smuzhiyun cq_host->mmio = devm_ioremap(&pdev->dev,
1138*4882a593Smuzhiyun cqhci_memres->start,
1139*4882a593Smuzhiyun resource_size(cqhci_memres));
1140*4882a593Smuzhiyun if (!cq_host->mmio) {
1141*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1142*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun return cq_host;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun EXPORT_SYMBOL(cqhci_pltfm_init);
1149*4882a593Smuzhiyun
cqhci_ver_major(struct cqhci_host * cq_host)1150*4882a593Smuzhiyun static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
cqhci_ver_minor(struct cqhci_host * cq_host)1155*4882a593Smuzhiyun static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
cqhci_init(struct cqhci_host * cq_host,struct mmc_host * mmc,bool dma64)1162*4882a593Smuzhiyun int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1163*4882a593Smuzhiyun bool dma64)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun int err;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun cq_host->dma64 = dma64;
1168*4882a593Smuzhiyun cq_host->mmc = mmc;
1169*4882a593Smuzhiyun cq_host->mmc->cqe_private = cq_host;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun cq_host->num_slots = NUM_SLOTS;
1172*4882a593Smuzhiyun cq_host->dcmd_slot = DCMD_SLOT;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun mmc->cqe_ops = &cqhci_cqe_ops;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun mmc->cqe_qdepth = NUM_SLOTS;
1177*4882a593Smuzhiyun if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1178*4882a593Smuzhiyun mmc->cqe_qdepth -= 1;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1181*4882a593Smuzhiyun sizeof(*cq_host->slot), GFP_KERNEL);
1182*4882a593Smuzhiyun if (!cq_host->slot) {
1183*4882a593Smuzhiyun err = -ENOMEM;
1184*4882a593Smuzhiyun goto out_err;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun err = cqhci_crypto_init(cq_host);
1188*4882a593Smuzhiyun if (err) {
1189*4882a593Smuzhiyun pr_err("%s: CQHCI crypto initialization failed\n",
1190*4882a593Smuzhiyun mmc_hostname(mmc));
1191*4882a593Smuzhiyun goto out_err;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun spin_lock_init(&cq_host->lock);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun init_completion(&cq_host->halt_comp);
1197*4882a593Smuzhiyun init_waitqueue_head(&cq_host->wait_queue);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun pr_info("%s: CQHCI version %u.%02u\n",
1200*4882a593Smuzhiyun mmc_hostname(mmc), cqhci_ver_major(cq_host),
1201*4882a593Smuzhiyun cqhci_ver_minor(cq_host));
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun return 0;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun out_err:
1206*4882a593Smuzhiyun pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1207*4882a593Smuzhiyun mmc_hostname(mmc), cqhci_ver_major(cq_host),
1208*4882a593Smuzhiyun cqhci_ver_minor(cq_host), err);
1209*4882a593Smuzhiyun return err;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun EXPORT_SYMBOL(cqhci_init);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1214*4882a593Smuzhiyun MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1215*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1216