xref: /OK3568_Linux_fs/kernel/drivers/mmc/core/queue.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef MMC_QUEUE_H
3*4882a593Smuzhiyun #define MMC_QUEUE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <linux/blkdev.h>
7*4882a593Smuzhiyun #include <linux/blk-mq.h>
8*4882a593Smuzhiyun #include <linux/mmc/core.h>
9*4882a593Smuzhiyun #include <linux/mmc/host.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun enum mmc_issued {
12*4882a593Smuzhiyun 	MMC_REQ_STARTED,
13*4882a593Smuzhiyun 	MMC_REQ_BUSY,
14*4882a593Smuzhiyun 	MMC_REQ_FAILED_TO_START,
15*4882a593Smuzhiyun 	MMC_REQ_FINISHED,
16*4882a593Smuzhiyun };
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun enum mmc_issue_type {
19*4882a593Smuzhiyun 	MMC_ISSUE_SYNC,
20*4882a593Smuzhiyun 	MMC_ISSUE_DCMD,
21*4882a593Smuzhiyun 	MMC_ISSUE_ASYNC,
22*4882a593Smuzhiyun 	MMC_ISSUE_MAX,
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
req_to_mmc_queue_req(struct request * rq)25*4882a593Smuzhiyun static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	return blk_mq_rq_to_pdu(rq);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct mmc_queue_req;
31*4882a593Smuzhiyun 
mmc_queue_req_to_req(struct mmc_queue_req * mqr)32*4882a593Smuzhiyun static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	return blk_mq_rq_from_pdu(mqr);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct mmc_blk_data;
38*4882a593Smuzhiyun struct mmc_blk_ioc_data;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct mmc_blk_request {
41*4882a593Smuzhiyun 	struct mmc_request	mrq;
42*4882a593Smuzhiyun 	struct mmc_command	sbc;
43*4882a593Smuzhiyun 	struct mmc_command	cmd;
44*4882a593Smuzhiyun 	struct mmc_command	stop;
45*4882a593Smuzhiyun 	struct mmc_data		data;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  * enum mmc_drv_op - enumerates the operations in the mmc_queue_req
50*4882a593Smuzhiyun  * @MMC_DRV_OP_IOCTL: ioctl operation
51*4882a593Smuzhiyun  * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
52*4882a593Smuzhiyun  * @MMC_DRV_OP_BOOT_WP: write protect boot partitions
53*4882a593Smuzhiyun  * @MMC_DRV_OP_GET_CARD_STATUS: get card status
54*4882a593Smuzhiyun  * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun enum mmc_drv_op {
57*4882a593Smuzhiyun 	MMC_DRV_OP_IOCTL,
58*4882a593Smuzhiyun 	MMC_DRV_OP_IOCTL_RPMB,
59*4882a593Smuzhiyun 	MMC_DRV_OP_BOOT_WP,
60*4882a593Smuzhiyun 	MMC_DRV_OP_GET_CARD_STATUS,
61*4882a593Smuzhiyun 	MMC_DRV_OP_GET_EXT_CSD,
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct mmc_queue_req {
65*4882a593Smuzhiyun 	struct mmc_blk_request	brq;
66*4882a593Smuzhiyun 	struct scatterlist	*sg;
67*4882a593Smuzhiyun 	enum mmc_drv_op		drv_op;
68*4882a593Smuzhiyun 	int			drv_op_result;
69*4882a593Smuzhiyun 	void			*drv_op_data;
70*4882a593Smuzhiyun 	unsigned int		ioc_count;
71*4882a593Smuzhiyun 	int			retries;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun struct mmc_queue {
75*4882a593Smuzhiyun 	struct mmc_card		*card;
76*4882a593Smuzhiyun 	struct mmc_ctx		ctx;
77*4882a593Smuzhiyun 	struct blk_mq_tag_set	tag_set;
78*4882a593Smuzhiyun 	struct mmc_blk_data	*blkdata;
79*4882a593Smuzhiyun 	struct request_queue	*queue;
80*4882a593Smuzhiyun 	spinlock_t		lock;
81*4882a593Smuzhiyun 	int			in_flight[MMC_ISSUE_MAX];
82*4882a593Smuzhiyun 	unsigned int		cqe_busy;
83*4882a593Smuzhiyun #define MMC_CQE_DCMD_BUSY	BIT(0)
84*4882a593Smuzhiyun #define MMC_CQE_QUEUE_FULL	BIT(1)
85*4882a593Smuzhiyun 	bool			busy;
86*4882a593Smuzhiyun 	bool			use_cqe;
87*4882a593Smuzhiyun 	bool			recovery_needed;
88*4882a593Smuzhiyun 	bool			in_recovery;
89*4882a593Smuzhiyun 	bool			rw_wait;
90*4882a593Smuzhiyun 	bool			waiting;
91*4882a593Smuzhiyun 	struct work_struct	recovery_work;
92*4882a593Smuzhiyun 	wait_queue_head_t	wait;
93*4882a593Smuzhiyun 	struct request		*recovery_req;
94*4882a593Smuzhiyun 	struct request		*complete_req;
95*4882a593Smuzhiyun 	struct mutex		complete_lock;
96*4882a593Smuzhiyun 	struct work_struct	complete_work;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *);
100*4882a593Smuzhiyun extern void mmc_cleanup_queue(struct mmc_queue *);
101*4882a593Smuzhiyun extern void mmc_queue_suspend(struct mmc_queue *);
102*4882a593Smuzhiyun extern void mmc_queue_resume(struct mmc_queue *);
103*4882a593Smuzhiyun extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
104*4882a593Smuzhiyun 				     struct mmc_queue_req *);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun void mmc_cqe_check_busy(struct mmc_queue *mq);
107*4882a593Smuzhiyun void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
110*4882a593Smuzhiyun 
mmc_tot_in_flight(struct mmc_queue * mq)111*4882a593Smuzhiyun static inline int mmc_tot_in_flight(struct mmc_queue *mq)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	return mq->in_flight[MMC_ISSUE_SYNC] +
114*4882a593Smuzhiyun 	       mq->in_flight[MMC_ISSUE_DCMD] +
115*4882a593Smuzhiyun 	       mq->in_flight[MMC_ISSUE_ASYNC];
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
mmc_cqe_qcnt(struct mmc_queue * mq)118*4882a593Smuzhiyun static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return mq->in_flight[MMC_ISSUE_DCMD] +
121*4882a593Smuzhiyun 	       mq->in_flight[MMC_ISSUE_ASYNC];
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #endif
125