xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/mmc_hsq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * MMC software queue support based on command queue interfaces
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2019 Linaro, Inc.
7*4882a593Smuzhiyun  * Author: Baolin Wang <baolin.wang@linaro.org>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/mmc/card.h>
11*4882a593Smuzhiyun #include <linux/mmc/host.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "mmc_hsq.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define HSQ_NUM_SLOTS	64
17*4882a593Smuzhiyun #define HSQ_INVALID_TAG	HSQ_NUM_SLOTS
18*4882a593Smuzhiyun 
mmc_hsq_retry_handler(struct work_struct * work)19*4882a593Smuzhiyun static void mmc_hsq_retry_handler(struct work_struct *work)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
22*4882a593Smuzhiyun 	struct mmc_host *mmc = hsq->mmc;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	mmc->ops->request(mmc, hsq->mrq);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
mmc_hsq_pump_requests(struct mmc_hsq * hsq)27*4882a593Smuzhiyun static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct mmc_host *mmc = hsq->mmc;
30*4882a593Smuzhiyun 	struct hsq_slot *slot;
31*4882a593Smuzhiyun 	unsigned long flags;
32*4882a593Smuzhiyun 	int ret = 0;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	spin_lock_irqsave(&hsq->lock, flags);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/* Make sure we are not already running a request now */
37*4882a593Smuzhiyun 	if (hsq->mrq || hsq->recovery_halt) {
38*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hsq->lock, flags);
39*4882a593Smuzhiyun 		return;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* Make sure there are remain requests need to pump */
43*4882a593Smuzhiyun 	if (!hsq->qcnt || !hsq->enabled) {
44*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hsq->lock, flags);
45*4882a593Smuzhiyun 		return;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	slot = &hsq->slot[hsq->next_tag];
49*4882a593Smuzhiyun 	hsq->mrq = slot->mrq;
50*4882a593Smuzhiyun 	hsq->qcnt--;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsq->lock, flags);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (mmc->ops->request_atomic)
55*4882a593Smuzhiyun 		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
56*4882a593Smuzhiyun 	else
57*4882a593Smuzhiyun 		mmc->ops->request(mmc, hsq->mrq);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/*
60*4882a593Smuzhiyun 	 * If returning BUSY from request_atomic(), which means the card
61*4882a593Smuzhiyun 	 * may be busy now, and we should change to non-atomic context to
62*4882a593Smuzhiyun 	 * try again for this unusual case, to avoid time-consuming operations
63*4882a593Smuzhiyun 	 * in the atomic context.
64*4882a593Smuzhiyun 	 *
65*4882a593Smuzhiyun 	 * Note: we just give a warning for other error cases, since the host
66*4882a593Smuzhiyun 	 * driver will handle them.
67*4882a593Smuzhiyun 	 */
68*4882a593Smuzhiyun 	if (ret == -EBUSY)
69*4882a593Smuzhiyun 		schedule_work(&hsq->retry_work);
70*4882a593Smuzhiyun 	else
71*4882a593Smuzhiyun 		WARN_ON_ONCE(ret);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
mmc_hsq_update_next_tag(struct mmc_hsq * hsq,int remains)74*4882a593Smuzhiyun static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct hsq_slot *slot;
77*4882a593Smuzhiyun 	int tag;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/*
80*4882a593Smuzhiyun 	 * If there are no remain requests in software queue, then set a invalid
81*4882a593Smuzhiyun 	 * tag.
82*4882a593Smuzhiyun 	 */
83*4882a593Smuzhiyun 	if (!remains) {
84*4882a593Smuzhiyun 		hsq->next_tag = HSQ_INVALID_TAG;
85*4882a593Smuzhiyun 		return;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * Increasing the next tag and check if the corresponding request is
90*4882a593Smuzhiyun 	 * available, if yes, then we found a candidate request.
91*4882a593Smuzhiyun 	 */
92*4882a593Smuzhiyun 	if (++hsq->next_tag != HSQ_INVALID_TAG) {
93*4882a593Smuzhiyun 		slot = &hsq->slot[hsq->next_tag];
94*4882a593Smuzhiyun 		if (slot->mrq)
95*4882a593Smuzhiyun 			return;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* Othersie we should iterate all slots to find a available tag. */
99*4882a593Smuzhiyun 	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
100*4882a593Smuzhiyun 		slot = &hsq->slot[tag];
101*4882a593Smuzhiyun 		if (slot->mrq)
102*4882a593Smuzhiyun 			break;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (tag == HSQ_NUM_SLOTS)
106*4882a593Smuzhiyun 		tag = HSQ_INVALID_TAG;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	hsq->next_tag = tag;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
mmc_hsq_post_request(struct mmc_hsq * hsq)111*4882a593Smuzhiyun static void mmc_hsq_post_request(struct mmc_hsq *hsq)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	unsigned long flags;
114*4882a593Smuzhiyun 	int remains;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	spin_lock_irqsave(&hsq->lock, flags);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	remains = hsq->qcnt;
119*4882a593Smuzhiyun 	hsq->mrq = NULL;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Update the next available tag to be queued. */
122*4882a593Smuzhiyun 	mmc_hsq_update_next_tag(hsq, remains);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (hsq->waiting_for_idle && !remains) {
125*4882a593Smuzhiyun 		hsq->waiting_for_idle = false;
126*4882a593Smuzhiyun 		wake_up(&hsq->wait_queue);
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Do not pump new request in recovery mode. */
130*4882a593Smuzhiyun 	if (hsq->recovery_halt) {
131*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hsq->lock, flags);
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsq->lock, flags);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	 /*
138*4882a593Smuzhiyun 	  * Try to pump new request to host controller as fast as possible,
139*4882a593Smuzhiyun 	  * after completing previous request.
140*4882a593Smuzhiyun 	  */
141*4882a593Smuzhiyun 	if (remains > 0)
142*4882a593Smuzhiyun 		mmc_hsq_pump_requests(hsq);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun  * mmc_hsq_finalize_request - finalize one request if the request is done
147*4882a593Smuzhiyun  * @mmc: the host controller
148*4882a593Smuzhiyun  * @mrq: the request need to be finalized
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * Return true if we finalized the corresponding request in software queue,
151*4882a593Smuzhiyun  * otherwise return false.
152*4882a593Smuzhiyun  */
mmc_hsq_finalize_request(struct mmc_host * mmc,struct mmc_request * mrq)153*4882a593Smuzhiyun bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
156*4882a593Smuzhiyun 	unsigned long flags;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	spin_lock_irqsave(&hsq->lock, flags);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hsq->lock, flags);
162*4882a593Smuzhiyun 		return false;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * Clear current completed slot request to make a room for new request.
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	hsq->slot[hsq->next_tag].mrq = NULL;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsq->lock, flags);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	mmc_cqe_request_done(mmc, hsq->mrq);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	mmc_hsq_post_request(hsq);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return true;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
179*4882a593Smuzhiyun 
mmc_hsq_recovery_start(struct mmc_host * mmc)180*4882a593Smuzhiyun static void mmc_hsq_recovery_start(struct mmc_host *mmc)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
183*4882a593Smuzhiyun 	unsigned long flags;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	spin_lock_irqsave(&hsq->lock, flags);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	hsq->recovery_halt = true;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsq->lock, flags);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
mmc_hsq_recovery_finish(struct mmc_host * mmc)192*4882a593Smuzhiyun static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
195*4882a593Smuzhiyun 	int remains;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	hsq->recovery_halt = false;
200*4882a593Smuzhiyun 	remains = hsq->qcnt;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Try to pump new request if there are request pending in software
206*4882a593Smuzhiyun 	 * queue after finishing recovery.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	if (remains > 0)
209*4882a593Smuzhiyun 		mmc_hsq_pump_requests(hsq);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
mmc_hsq_request(struct mmc_host * mmc,struct mmc_request * mrq)212*4882a593Smuzhiyun static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
215*4882a593Smuzhiyun 	int tag = mrq->tag;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (!hsq->enabled) {
220*4882a593Smuzhiyun 		spin_unlock_irq(&hsq->lock);
221*4882a593Smuzhiyun 		return -ESHUTDOWN;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Do not queue any new requests in recovery mode. */
225*4882a593Smuzhiyun 	if (hsq->recovery_halt) {
226*4882a593Smuzhiyun 		spin_unlock_irq(&hsq->lock);
227*4882a593Smuzhiyun 		return -EBUSY;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	hsq->slot[tag].mrq = mrq;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/*
233*4882a593Smuzhiyun 	 * Set the next tag as current request tag if no available
234*4882a593Smuzhiyun 	 * next tag.
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	if (hsq->next_tag == HSQ_INVALID_TAG)
237*4882a593Smuzhiyun 		hsq->next_tag = tag;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	hsq->qcnt++;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	mmc_hsq_pump_requests(hsq);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
mmc_hsq_post_req(struct mmc_host * mmc,struct mmc_request * mrq)248*4882a593Smuzhiyun static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	if (mmc->ops->post_req)
251*4882a593Smuzhiyun 		mmc->ops->post_req(mmc, mrq, 0);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
mmc_hsq_queue_is_idle(struct mmc_hsq * hsq,int * ret)254*4882a593Smuzhiyun static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	bool is_idle;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	is_idle = (!hsq->mrq && !hsq->qcnt) ||
261*4882a593Smuzhiyun 		hsq->recovery_halt;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	*ret = hsq->recovery_halt ? -EBUSY : 0;
264*4882a593Smuzhiyun 	hsq->waiting_for_idle = !is_idle;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return is_idle;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
mmc_hsq_wait_for_idle(struct mmc_host * mmc)271*4882a593Smuzhiyun static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
274*4882a593Smuzhiyun 	int ret;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	wait_event(hsq->wait_queue,
277*4882a593Smuzhiyun 		   mmc_hsq_queue_is_idle(hsq, &ret));
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
mmc_hsq_disable(struct mmc_host * mmc)282*4882a593Smuzhiyun static void mmc_hsq_disable(struct mmc_host *mmc)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
285*4882a593Smuzhiyun 	u32 timeout = 500;
286*4882a593Smuzhiyun 	int ret;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (!hsq->enabled) {
291*4882a593Smuzhiyun 		spin_unlock_irq(&hsq->lock);
292*4882a593Smuzhiyun 		return;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	ret = wait_event_timeout(hsq->wait_queue,
298*4882a593Smuzhiyun 				 mmc_hsq_queue_is_idle(hsq, &ret),
299*4882a593Smuzhiyun 				 msecs_to_jiffies(timeout));
300*4882a593Smuzhiyun 	if (ret == 0) {
301*4882a593Smuzhiyun 		pr_warn("could not stop mmc software queue\n");
302*4882a593Smuzhiyun 		return;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	hsq->enabled = false;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
mmc_hsq_enable(struct mmc_host * mmc,struct mmc_card * card)312*4882a593Smuzhiyun static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct mmc_hsq *hsq = mmc->cqe_private;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	spin_lock_irq(&hsq->lock);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (hsq->enabled) {
319*4882a593Smuzhiyun 		spin_unlock_irq(&hsq->lock);
320*4882a593Smuzhiyun 		return -EBUSY;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	hsq->enabled = true;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	spin_unlock_irq(&hsq->lock);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static const struct mmc_cqe_ops mmc_hsq_ops = {
331*4882a593Smuzhiyun 	.cqe_enable = mmc_hsq_enable,
332*4882a593Smuzhiyun 	.cqe_disable = mmc_hsq_disable,
333*4882a593Smuzhiyun 	.cqe_request = mmc_hsq_request,
334*4882a593Smuzhiyun 	.cqe_post_req = mmc_hsq_post_req,
335*4882a593Smuzhiyun 	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
336*4882a593Smuzhiyun 	.cqe_recovery_start = mmc_hsq_recovery_start,
337*4882a593Smuzhiyun 	.cqe_recovery_finish = mmc_hsq_recovery_finish,
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
mmc_hsq_init(struct mmc_hsq * hsq,struct mmc_host * mmc)340*4882a593Smuzhiyun int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	hsq->num_slots = HSQ_NUM_SLOTS;
343*4882a593Smuzhiyun 	hsq->next_tag = HSQ_INVALID_TAG;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
346*4882a593Smuzhiyun 				 sizeof(struct hsq_slot), GFP_KERNEL);
347*4882a593Smuzhiyun 	if (!hsq->slot)
348*4882a593Smuzhiyun 		return -ENOMEM;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	hsq->mmc = mmc;
351*4882a593Smuzhiyun 	hsq->mmc->cqe_private = hsq;
352*4882a593Smuzhiyun 	mmc->cqe_ops = &mmc_hsq_ops;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355*4882a593Smuzhiyun 	spin_lock_init(&hsq->lock);
356*4882a593Smuzhiyun 	init_waitqueue_head(&hsq->wait_queue);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmc_hsq_init);
361*4882a593Smuzhiyun 
mmc_hsq_suspend(struct mmc_host * mmc)362*4882a593Smuzhiyun void mmc_hsq_suspend(struct mmc_host *mmc)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	mmc_hsq_disable(mmc);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
367*4882a593Smuzhiyun 
mmc_hsq_resume(struct mmc_host * mmc)368*4882a593Smuzhiyun int mmc_hsq_resume(struct mmc_host *mmc)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	return mmc_hsq_enable(mmc, NULL);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmc_hsq_resume);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun MODULE_DESCRIPTION("MMC Host Software Queue support");
375*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
376