xref: /OK3568_Linux_fs/kernel/include/linux/mhi.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef _MHI_H_
7*4882a593Smuzhiyun #define _MHI_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/dma-direction.h>
11*4882a593Smuzhiyun #include <linux/mutex.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/wait.h>
16*4882a593Smuzhiyun #include <linux/workqueue.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct mhi_chan;
21*4882a593Smuzhiyun struct mhi_event;
22*4882a593Smuzhiyun struct mhi_ctxt;
23*4882a593Smuzhiyun struct mhi_cmd;
24*4882a593Smuzhiyun struct mhi_buf_info;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun  * enum mhi_callback - MHI callback
28*4882a593Smuzhiyun  * @MHI_CB_IDLE: MHI entered idle state
29*4882a593Smuzhiyun  * @MHI_CB_PENDING_DATA: New data available for client to process
30*4882a593Smuzhiyun  * @MHI_CB_LPM_ENTER: MHI host entered low power mode
31*4882a593Smuzhiyun  * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
32*4882a593Smuzhiyun  * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
33*4882a593Smuzhiyun  * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
34*4882a593Smuzhiyun  * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
35*4882a593Smuzhiyun  * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
36*4882a593Smuzhiyun  * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun enum mhi_callback {
39*4882a593Smuzhiyun 	MHI_CB_IDLE,
40*4882a593Smuzhiyun 	MHI_CB_PENDING_DATA,
41*4882a593Smuzhiyun 	MHI_CB_LPM_ENTER,
42*4882a593Smuzhiyun 	MHI_CB_LPM_EXIT,
43*4882a593Smuzhiyun 	MHI_CB_EE_RDDM,
44*4882a593Smuzhiyun 	MHI_CB_EE_MISSION_MODE,
45*4882a593Smuzhiyun 	MHI_CB_SYS_ERROR,
46*4882a593Smuzhiyun 	MHI_CB_FATAL_ERROR,
47*4882a593Smuzhiyun 	MHI_CB_BW_REQ,
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun  * enum mhi_flags - Transfer flags
52*4882a593Smuzhiyun  * @MHI_EOB: End of buffer for bulk transfer
53*4882a593Smuzhiyun  * @MHI_EOT: End of transfer
54*4882a593Smuzhiyun  * @MHI_CHAIN: Linked transfer
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun enum mhi_flags {
57*4882a593Smuzhiyun 	MHI_EOB = BIT(0),
58*4882a593Smuzhiyun 	MHI_EOT = BIT(1),
59*4882a593Smuzhiyun 	MHI_CHAIN = BIT(2),
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /**
63*4882a593Smuzhiyun  * enum mhi_device_type - Device types
64*4882a593Smuzhiyun  * @MHI_DEVICE_XFER: Handles data transfer
65*4882a593Smuzhiyun  * @MHI_DEVICE_CONTROLLER: Control device
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun enum mhi_device_type {
68*4882a593Smuzhiyun 	MHI_DEVICE_XFER,
69*4882a593Smuzhiyun 	MHI_DEVICE_CONTROLLER,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * enum mhi_ch_type - Channel types
74*4882a593Smuzhiyun  * @MHI_CH_TYPE_INVALID: Invalid channel type
75*4882a593Smuzhiyun  * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
76*4882a593Smuzhiyun  * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
77*4882a593Smuzhiyun  * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
78*4882a593Smuzhiyun  *				   multiple packets and send them as a single
79*4882a593Smuzhiyun  *				   large packet to reduce CPU consumption
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun enum mhi_ch_type {
82*4882a593Smuzhiyun 	MHI_CH_TYPE_INVALID = 0,
83*4882a593Smuzhiyun 	MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
84*4882a593Smuzhiyun 	MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
85*4882a593Smuzhiyun 	MHI_CH_TYPE_INBOUND_COALESCED = 3,
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun  * struct image_info - Firmware and RDDM table
90*4882a593Smuzhiyun  * @mhi_buf: Buffer for firmware and RDDM table
91*4882a593Smuzhiyun  * @entries: # of entries in table
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun struct image_info {
94*4882a593Smuzhiyun 	struct mhi_buf *mhi_buf;
95*4882a593Smuzhiyun 	/* private: from internal.h */
96*4882a593Smuzhiyun 	struct bhi_vec_entry *bhi_vec;
97*4882a593Smuzhiyun 	/* public: */
98*4882a593Smuzhiyun 	u32 entries;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun  * struct mhi_link_info - BW requirement
103*4882a593Smuzhiyun  * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
104*4882a593Smuzhiyun  * target_link_width - Link width as defined by NLW bits in LinkStatus reg
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun struct mhi_link_info {
107*4882a593Smuzhiyun 	unsigned int target_link_speed;
108*4882a593Smuzhiyun 	unsigned int target_link_width;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /**
112*4882a593Smuzhiyun  * enum mhi_ee_type - Execution environment types
113*4882a593Smuzhiyun  * @MHI_EE_PBL: Primary Bootloader
114*4882a593Smuzhiyun  * @MHI_EE_SBL: Secondary Bootloader
115*4882a593Smuzhiyun  * @MHI_EE_AMSS: Modem, aka the primary runtime EE
116*4882a593Smuzhiyun  * @MHI_EE_RDDM: Ram dump download mode
117*4882a593Smuzhiyun  * @MHI_EE_WFW: WLAN firmware mode
118*4882a593Smuzhiyun  * @MHI_EE_PTHRU: Passthrough
119*4882a593Smuzhiyun  * @MHI_EE_EDL: Embedded downloader
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun enum mhi_ee_type {
122*4882a593Smuzhiyun 	MHI_EE_PBL,
123*4882a593Smuzhiyun 	MHI_EE_SBL,
124*4882a593Smuzhiyun 	MHI_EE_AMSS,
125*4882a593Smuzhiyun 	MHI_EE_RDDM,
126*4882a593Smuzhiyun 	MHI_EE_WFW,
127*4882a593Smuzhiyun 	MHI_EE_PTHRU,
128*4882a593Smuzhiyun 	MHI_EE_EDL,
129*4882a593Smuzhiyun 	MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
130*4882a593Smuzhiyun 	MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
131*4882a593Smuzhiyun 	MHI_EE_NOT_SUPPORTED,
132*4882a593Smuzhiyun 	MHI_EE_MAX,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * enum mhi_state - MHI states
137*4882a593Smuzhiyun  * @MHI_STATE_RESET: Reset state
138*4882a593Smuzhiyun  * @MHI_STATE_READY: Ready state
139*4882a593Smuzhiyun  * @MHI_STATE_M0: M0 state
140*4882a593Smuzhiyun  * @MHI_STATE_M1: M1 state
141*4882a593Smuzhiyun  * @MHI_STATE_M2: M2 state
142*4882a593Smuzhiyun  * @MHI_STATE_M3: M3 state
143*4882a593Smuzhiyun  * @MHI_STATE_M3_FAST: M3 Fast state
144*4882a593Smuzhiyun  * @MHI_STATE_BHI: BHI state
145*4882a593Smuzhiyun  * @MHI_STATE_SYS_ERR: System Error state
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun enum mhi_state {
148*4882a593Smuzhiyun 	MHI_STATE_RESET = 0x0,
149*4882a593Smuzhiyun 	MHI_STATE_READY = 0x1,
150*4882a593Smuzhiyun 	MHI_STATE_M0 = 0x2,
151*4882a593Smuzhiyun 	MHI_STATE_M1 = 0x3,
152*4882a593Smuzhiyun 	MHI_STATE_M2 = 0x4,
153*4882a593Smuzhiyun 	MHI_STATE_M3 = 0x5,
154*4882a593Smuzhiyun 	MHI_STATE_M3_FAST = 0x6,
155*4882a593Smuzhiyun 	MHI_STATE_BHI = 0x7,
156*4882a593Smuzhiyun 	MHI_STATE_SYS_ERR = 0xFF,
157*4882a593Smuzhiyun 	MHI_STATE_MAX,
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * enum mhi_ch_ee_mask - Execution environment mask for channel
162*4882a593Smuzhiyun  * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
163*4882a593Smuzhiyun  * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
164*4882a593Smuzhiyun  * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
165*4882a593Smuzhiyun  * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
166*4882a593Smuzhiyun  * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
167*4882a593Smuzhiyun  * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
168*4882a593Smuzhiyun  * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
169*4882a593Smuzhiyun  */
170*4882a593Smuzhiyun enum mhi_ch_ee_mask {
171*4882a593Smuzhiyun 	MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
172*4882a593Smuzhiyun 	MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
173*4882a593Smuzhiyun 	MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
174*4882a593Smuzhiyun 	MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
175*4882a593Smuzhiyun 	MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
176*4882a593Smuzhiyun 	MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
177*4882a593Smuzhiyun 	MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun  * enum mhi_er_data_type - Event ring data types
182*4882a593Smuzhiyun  * @MHI_ER_DATA: Only client data over this ring
183*4882a593Smuzhiyun  * @MHI_ER_CTRL: MHI control data and client data
184*4882a593Smuzhiyun  */
185*4882a593Smuzhiyun enum mhi_er_data_type {
186*4882a593Smuzhiyun 	MHI_ER_DATA,
187*4882a593Smuzhiyun 	MHI_ER_CTRL,
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * enum mhi_db_brst_mode - Doorbell mode
192*4882a593Smuzhiyun  * @MHI_DB_BRST_DISABLE: Burst mode disable
193*4882a593Smuzhiyun  * @MHI_DB_BRST_ENABLE: Burst mode enable
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun enum mhi_db_brst_mode {
196*4882a593Smuzhiyun 	MHI_DB_BRST_DISABLE = 0x2,
197*4882a593Smuzhiyun 	MHI_DB_BRST_ENABLE = 0x3,
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * struct mhi_channel_config - Channel configuration structure for controller
202*4882a593Smuzhiyun  * @name: The name of this channel
203*4882a593Smuzhiyun  * @num: The number assigned to this channel
204*4882a593Smuzhiyun  * @num_elements: The number of elements that can be queued to this channel
205*4882a593Smuzhiyun  * @local_elements: The local ring length of the channel
206*4882a593Smuzhiyun  * @event_ring: The event rung index that services this channel
207*4882a593Smuzhiyun  * @dir: Direction that data may flow on this channel
208*4882a593Smuzhiyun  * @type: Channel type
209*4882a593Smuzhiyun  * @ee_mask: Execution Environment mask for this channel
210*4882a593Smuzhiyun  * @pollcfg: Polling configuration for burst mode.  0 is default.  milliseconds
211*4882a593Smuzhiyun 	     for UL channels, multiple of 8 ring elements for DL channels
212*4882a593Smuzhiyun  * @doorbell: Doorbell mode
213*4882a593Smuzhiyun  * @lpm_notify: The channel master requires low power mode notifications
214*4882a593Smuzhiyun  * @offload_channel: The client manages the channel completely
215*4882a593Smuzhiyun  * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
216*4882a593Smuzhiyun  * @auto_queue: Framework will automatically queue buffers for DL traffic
217*4882a593Smuzhiyun  * @auto_start: Automatically start (open) this channel
218*4882a593Smuzhiyun  * @wake-capable: Channel capable of waking up the system
219*4882a593Smuzhiyun  */
220*4882a593Smuzhiyun struct mhi_channel_config {
221*4882a593Smuzhiyun 	char *name;
222*4882a593Smuzhiyun 	u32 num;
223*4882a593Smuzhiyun 	u32 num_elements;
224*4882a593Smuzhiyun 	u32 local_elements;
225*4882a593Smuzhiyun 	u32 event_ring;
226*4882a593Smuzhiyun 	enum dma_data_direction dir;
227*4882a593Smuzhiyun 	enum mhi_ch_type type;
228*4882a593Smuzhiyun 	u32 ee_mask;
229*4882a593Smuzhiyun 	u32 pollcfg;
230*4882a593Smuzhiyun 	enum mhi_db_brst_mode doorbell;
231*4882a593Smuzhiyun 	bool lpm_notify;
232*4882a593Smuzhiyun 	bool offload_channel;
233*4882a593Smuzhiyun 	bool doorbell_mode_switch;
234*4882a593Smuzhiyun 	bool auto_queue;
235*4882a593Smuzhiyun 	bool auto_start;
236*4882a593Smuzhiyun 	bool wake_capable;
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun  * struct mhi_event_config - Event ring configuration structure for controller
241*4882a593Smuzhiyun  * @num_elements: The number of elements that can be queued to this ring
242*4882a593Smuzhiyun  * @irq_moderation_ms: Delay irq for additional events to be aggregated
243*4882a593Smuzhiyun  * @irq: IRQ associated with this ring
244*4882a593Smuzhiyun  * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
245*4882a593Smuzhiyun  * @priority: Priority of this ring. Use 1 for now
246*4882a593Smuzhiyun  * @mode: Doorbell mode
247*4882a593Smuzhiyun  * @data_type: Type of data this ring will process
248*4882a593Smuzhiyun  * @hardware_event: This ring is associated with hardware channels
249*4882a593Smuzhiyun  * @client_managed: This ring is client managed
250*4882a593Smuzhiyun  * @offload_channel: This ring is associated with an offloaded channel
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun struct mhi_event_config {
253*4882a593Smuzhiyun 	u32 num_elements;
254*4882a593Smuzhiyun 	u32 irq_moderation_ms;
255*4882a593Smuzhiyun 	u32 irq;
256*4882a593Smuzhiyun 	u32 channel;
257*4882a593Smuzhiyun 	u32 priority;
258*4882a593Smuzhiyun 	enum mhi_db_brst_mode mode;
259*4882a593Smuzhiyun 	enum mhi_er_data_type data_type;
260*4882a593Smuzhiyun 	bool hardware_event;
261*4882a593Smuzhiyun 	bool client_managed;
262*4882a593Smuzhiyun 	bool offload_channel;
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun  * struct mhi_controller_config - Root MHI controller configuration
267*4882a593Smuzhiyun  * @max_channels: Maximum number of channels supported
268*4882a593Smuzhiyun  * @timeout_ms: Timeout value for operations. 0 means use default
269*4882a593Smuzhiyun  * @buf_len: Size of automatically allocated buffers. 0 means use default
270*4882a593Smuzhiyun  * @num_channels: Number of channels defined in @ch_cfg
271*4882a593Smuzhiyun  * @ch_cfg: Array of defined channels
272*4882a593Smuzhiyun  * @num_events: Number of event rings defined in @event_cfg
273*4882a593Smuzhiyun  * @event_cfg: Array of defined event rings
274*4882a593Smuzhiyun  * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
275*4882a593Smuzhiyun  * @m2_no_db: Host is not allowed to ring DB in M2 state
276*4882a593Smuzhiyun  */
277*4882a593Smuzhiyun struct mhi_controller_config {
278*4882a593Smuzhiyun 	u32 max_channels;
279*4882a593Smuzhiyun 	u32 timeout_ms;
280*4882a593Smuzhiyun 	u32 buf_len;
281*4882a593Smuzhiyun 	u32 num_channels;
282*4882a593Smuzhiyun 	const struct mhi_channel_config *ch_cfg;
283*4882a593Smuzhiyun 	u32 num_events;
284*4882a593Smuzhiyun 	const struct mhi_event_config *event_cfg;
285*4882a593Smuzhiyun 	bool use_bounce_buf;
286*4882a593Smuzhiyun 	bool m2_no_db;
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * struct mhi_controller - Master MHI controller structure
291*4882a593Smuzhiyun  * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
292*4882a593Smuzhiyun  *            controller (required)
293*4882a593Smuzhiyun  * @mhi_dev: MHI device instance for the controller
294*4882a593Smuzhiyun  * @debugfs_dentry: MHI controller debugfs directory
295*4882a593Smuzhiyun  * @regs: Base address of MHI MMIO register space (required)
296*4882a593Smuzhiyun  * @bhi: Points to base of MHI BHI register space
297*4882a593Smuzhiyun  * @bhie: Points to base of MHI BHIe register space
298*4882a593Smuzhiyun  * @wake_db: MHI WAKE doorbell register address
299*4882a593Smuzhiyun  * @iova_start: IOMMU starting address for data (required)
300*4882a593Smuzhiyun  * @iova_stop: IOMMU stop address for data (required)
301*4882a593Smuzhiyun  * @fw_image: Firmware image name for normal booting (required)
302*4882a593Smuzhiyun  * @edl_image: Firmware image name for emergency download mode (optional)
303*4882a593Smuzhiyun  * @rddm_size: RAM dump size that host should allocate for debugging purpose
304*4882a593Smuzhiyun  * @sbl_size: SBL image size downloaded through BHIe (optional)
305*4882a593Smuzhiyun  * @seg_len: BHIe vector size (optional)
306*4882a593Smuzhiyun  * @fbc_image: Points to firmware image buffer
307*4882a593Smuzhiyun  * @rddm_image: Points to RAM dump buffer
308*4882a593Smuzhiyun  * @mhi_chan: Points to the channel configuration table
309*4882a593Smuzhiyun  * @lpm_chans: List of channels that require LPM notifications
310*4882a593Smuzhiyun  * @irq: base irq # to request (required)
311*4882a593Smuzhiyun  * @max_chan: Maximum number of channels the controller supports
312*4882a593Smuzhiyun  * @total_ev_rings: Total # of event rings allocated
313*4882a593Smuzhiyun  * @hw_ev_rings: Number of hardware event rings
314*4882a593Smuzhiyun  * @sw_ev_rings: Number of software event rings
315*4882a593Smuzhiyun  * @nr_irqs: Number of IRQ allocated by bus master (required)
316*4882a593Smuzhiyun  * @family_number: MHI controller family number
317*4882a593Smuzhiyun  * @device_number: MHI controller device number
318*4882a593Smuzhiyun  * @major_version: MHI controller major revision number
319*4882a593Smuzhiyun  * @minor_version: MHI controller minor revision number
320*4882a593Smuzhiyun  * @serial_number: MHI controller serial number obtained from BHI
321*4882a593Smuzhiyun  * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI
322*4882a593Smuzhiyun  * @mhi_event: MHI event ring configurations table
323*4882a593Smuzhiyun  * @mhi_cmd: MHI command ring configurations table
324*4882a593Smuzhiyun  * @mhi_ctxt: MHI device context, shared memory between host and device
325*4882a593Smuzhiyun  * @pm_mutex: Mutex for suspend/resume operation
326*4882a593Smuzhiyun  * @pm_lock: Lock for protecting MHI power management state
327*4882a593Smuzhiyun  * @timeout_ms: Timeout in ms for state transitions
328*4882a593Smuzhiyun  * @pm_state: MHI power management state
329*4882a593Smuzhiyun  * @db_access: DB access states
330*4882a593Smuzhiyun  * @ee: MHI device execution environment
331*4882a593Smuzhiyun  * @dev_state: MHI device state
332*4882a593Smuzhiyun  * @dev_wake: Device wakeup count
333*4882a593Smuzhiyun  * @pending_pkts: Pending packets for the controller
334*4882a593Smuzhiyun  * @M0, M2, M3: Counters to track number of device MHI state changes
335*4882a593Smuzhiyun  * @transition_list: List of MHI state transitions
336*4882a593Smuzhiyun  * @transition_lock: Lock for protecting MHI state transition list
337*4882a593Smuzhiyun  * @wlock: Lock for protecting device wakeup
338*4882a593Smuzhiyun  * @mhi_link_info: Device bandwidth info
339*4882a593Smuzhiyun  * @st_worker: State transition worker
340*4882a593Smuzhiyun  * @state_event: State change event
341*4882a593Smuzhiyun  * @status_cb: CB function to notify power states of the device (required)
342*4882a593Smuzhiyun  * @wake_get: CB function to assert device wake (optional)
343*4882a593Smuzhiyun  * @wake_put: CB function to de-assert device wake (optional)
344*4882a593Smuzhiyun  * @wake_toggle: CB function to assert and de-assert device wake (optional)
345*4882a593Smuzhiyun  * @runtime_get: CB function to controller runtime resume (required)
346*4882a593Smuzhiyun  * @runtime_put: CB function to decrement pm usage (required)
347*4882a593Smuzhiyun  * @map_single: CB function to create TRE buffer
348*4882a593Smuzhiyun  * @unmap_single: CB function to destroy TRE buffer
349*4882a593Smuzhiyun  * @read_reg: Read a MHI register via the physical link (required)
350*4882a593Smuzhiyun  * @write_reg: Write a MHI register via the physical link (required)
351*4882a593Smuzhiyun  * @buffer_len: Bounce buffer length
352*4882a593Smuzhiyun  * @bounce_buf: Use of bounce buffer
353*4882a593Smuzhiyun  * @fbc_download: MHI host needs to do complete image transfer (optional)
354*4882a593Smuzhiyun  * @pre_init: MHI host needs to do pre-initialization before power up
355*4882a593Smuzhiyun  * @wake_set: Device wakeup set flag
356*4882a593Smuzhiyun  *
357*4882a593Smuzhiyun  * Fields marked as (required) need to be populated by the controller driver
358*4882a593Smuzhiyun  * before calling mhi_register_controller(). For the fields marked as (optional)
359*4882a593Smuzhiyun  * they can be populated depending on the usecase.
360*4882a593Smuzhiyun  *
361*4882a593Smuzhiyun  * The following fields are present for the purpose of implementing any device
362*4882a593Smuzhiyun  * specific quirks or customizations for specific MHI revisions used in device
363*4882a593Smuzhiyun  * by the controller drivers. The MHI stack will just populate these fields
364*4882a593Smuzhiyun  * during mhi_register_controller():
365*4882a593Smuzhiyun  *  family_number
366*4882a593Smuzhiyun  *  device_number
367*4882a593Smuzhiyun  *  major_version
368*4882a593Smuzhiyun  *  minor_version
369*4882a593Smuzhiyun  */
370*4882a593Smuzhiyun struct mhi_controller {
371*4882a593Smuzhiyun 	struct device *cntrl_dev;
372*4882a593Smuzhiyun 	struct mhi_device *mhi_dev;
373*4882a593Smuzhiyun 	struct dentry *debugfs_dentry;
374*4882a593Smuzhiyun 	void __iomem *regs;
375*4882a593Smuzhiyun 	void __iomem *bhi;
376*4882a593Smuzhiyun 	void __iomem *bhie;
377*4882a593Smuzhiyun 	void __iomem *wake_db;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	dma_addr_t iova_start;
380*4882a593Smuzhiyun 	dma_addr_t iova_stop;
381*4882a593Smuzhiyun 	const char *fw_image;
382*4882a593Smuzhiyun 	const char *edl_image;
383*4882a593Smuzhiyun 	size_t rddm_size;
384*4882a593Smuzhiyun 	size_t sbl_size;
385*4882a593Smuzhiyun 	size_t seg_len;
386*4882a593Smuzhiyun 	struct image_info *fbc_image;
387*4882a593Smuzhiyun 	struct image_info *rddm_image;
388*4882a593Smuzhiyun 	struct mhi_chan *mhi_chan;
389*4882a593Smuzhiyun 	struct list_head lpm_chans;
390*4882a593Smuzhiyun 	int *irq;
391*4882a593Smuzhiyun 	u32 max_chan;
392*4882a593Smuzhiyun 	u32 total_ev_rings;
393*4882a593Smuzhiyun 	u32 hw_ev_rings;
394*4882a593Smuzhiyun 	u32 sw_ev_rings;
395*4882a593Smuzhiyun 	u32 nr_irqs;
396*4882a593Smuzhiyun 	u32 family_number;
397*4882a593Smuzhiyun 	u32 device_number;
398*4882a593Smuzhiyun 	u32 major_version;
399*4882a593Smuzhiyun 	u32 minor_version;
400*4882a593Smuzhiyun 	u32 serial_number;
401*4882a593Smuzhiyun 	u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS];
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	struct mhi_event *mhi_event;
404*4882a593Smuzhiyun 	struct mhi_cmd *mhi_cmd;
405*4882a593Smuzhiyun 	struct mhi_ctxt *mhi_ctxt;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	struct mutex pm_mutex;
408*4882a593Smuzhiyun 	rwlock_t pm_lock;
409*4882a593Smuzhiyun 	u32 timeout_ms;
410*4882a593Smuzhiyun 	u32 pm_state;
411*4882a593Smuzhiyun 	u32 db_access;
412*4882a593Smuzhiyun 	enum mhi_ee_type ee;
413*4882a593Smuzhiyun 	enum mhi_state dev_state;
414*4882a593Smuzhiyun 	atomic_t dev_wake;
415*4882a593Smuzhiyun 	atomic_t pending_pkts;
416*4882a593Smuzhiyun 	u32 M0, M2, M3;
417*4882a593Smuzhiyun 	struct list_head transition_list;
418*4882a593Smuzhiyun 	spinlock_t transition_lock;
419*4882a593Smuzhiyun 	spinlock_t wlock;
420*4882a593Smuzhiyun 	struct mhi_link_info mhi_link_info;
421*4882a593Smuzhiyun 	struct work_struct st_worker;
422*4882a593Smuzhiyun 	wait_queue_head_t state_event;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	void (*status_cb)(struct mhi_controller *mhi_cntrl,
425*4882a593Smuzhiyun 			  enum mhi_callback cb);
426*4882a593Smuzhiyun 	void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
427*4882a593Smuzhiyun 	void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
428*4882a593Smuzhiyun 	void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
429*4882a593Smuzhiyun 	int (*runtime_get)(struct mhi_controller *mhi_cntrl);
430*4882a593Smuzhiyun 	void (*runtime_put)(struct mhi_controller *mhi_cntrl);
431*4882a593Smuzhiyun 	int (*map_single)(struct mhi_controller *mhi_cntrl,
432*4882a593Smuzhiyun 			  struct mhi_buf_info *buf);
433*4882a593Smuzhiyun 	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
434*4882a593Smuzhiyun 			     struct mhi_buf_info *buf);
435*4882a593Smuzhiyun 	int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
436*4882a593Smuzhiyun 			u32 *out);
437*4882a593Smuzhiyun 	void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
438*4882a593Smuzhiyun 			  u32 val);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	size_t buffer_len;
441*4882a593Smuzhiyun 	bool bounce_buf;
442*4882a593Smuzhiyun 	bool fbc_download;
443*4882a593Smuzhiyun 	bool pre_init;
444*4882a593Smuzhiyun 	bool wake_set;
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun  * struct mhi_device - Structure representing an MHI device which binds
449*4882a593Smuzhiyun  *                     to channels or is associated with controllers
450*4882a593Smuzhiyun  * @id: Pointer to MHI device ID struct
451*4882a593Smuzhiyun  * @name: Name of the associated MHI device
452*4882a593Smuzhiyun  * @mhi_cntrl: Controller the device belongs to
453*4882a593Smuzhiyun  * @ul_chan: UL channel for the device
454*4882a593Smuzhiyun  * @dl_chan: DL channel for the device
455*4882a593Smuzhiyun  * @dev: Driver model device node for the MHI device
456*4882a593Smuzhiyun  * @dev_type: MHI device type
457*4882a593Smuzhiyun  * @ul_chan_id: MHI channel id for UL transfer
458*4882a593Smuzhiyun  * @dl_chan_id: MHI channel id for DL transfer
459*4882a593Smuzhiyun  * @dev_wake: Device wakeup counter
460*4882a593Smuzhiyun  */
461*4882a593Smuzhiyun struct mhi_device {
462*4882a593Smuzhiyun 	const struct mhi_device_id *id;
463*4882a593Smuzhiyun 	const char *name;
464*4882a593Smuzhiyun 	struct mhi_controller *mhi_cntrl;
465*4882a593Smuzhiyun 	struct mhi_chan *ul_chan;
466*4882a593Smuzhiyun 	struct mhi_chan *dl_chan;
467*4882a593Smuzhiyun 	struct device dev;
468*4882a593Smuzhiyun 	enum mhi_device_type dev_type;
469*4882a593Smuzhiyun 	int ul_chan_id;
470*4882a593Smuzhiyun 	int dl_chan_id;
471*4882a593Smuzhiyun 	u32 dev_wake;
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /**
475*4882a593Smuzhiyun  * struct mhi_result - Completed buffer information
476*4882a593Smuzhiyun  * @buf_addr: Address of data buffer
477*4882a593Smuzhiyun  * @bytes_xferd: # of bytes transferred
478*4882a593Smuzhiyun  * @dir: Channel direction
479*4882a593Smuzhiyun  * @transaction_status: Status of last transaction
480*4882a593Smuzhiyun  */
481*4882a593Smuzhiyun struct mhi_result {
482*4882a593Smuzhiyun 	void *buf_addr;
483*4882a593Smuzhiyun 	size_t bytes_xferd;
484*4882a593Smuzhiyun 	enum dma_data_direction dir;
485*4882a593Smuzhiyun 	int transaction_status;
486*4882a593Smuzhiyun };
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun /**
489*4882a593Smuzhiyun  * struct mhi_buf - MHI Buffer description
490*4882a593Smuzhiyun  * @buf: Virtual address of the buffer
491*4882a593Smuzhiyun  * @name: Buffer label. For offload channel, configurations name must be:
492*4882a593Smuzhiyun  *        ECA - Event context array data
493*4882a593Smuzhiyun  *        CCA - Channel context array data
494*4882a593Smuzhiyun  * @dma_addr: IOMMU address of the buffer
495*4882a593Smuzhiyun  * @len: # of bytes
496*4882a593Smuzhiyun  */
497*4882a593Smuzhiyun struct mhi_buf {
498*4882a593Smuzhiyun 	void *buf;
499*4882a593Smuzhiyun 	const char *name;
500*4882a593Smuzhiyun 	dma_addr_t dma_addr;
501*4882a593Smuzhiyun 	size_t len;
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /**
505*4882a593Smuzhiyun  * struct mhi_driver - Structure representing a MHI client driver
506*4882a593Smuzhiyun  * @probe: CB function for client driver probe function
507*4882a593Smuzhiyun  * @remove: CB function for client driver remove function
508*4882a593Smuzhiyun  * @ul_xfer_cb: CB function for UL data transfer
509*4882a593Smuzhiyun  * @dl_xfer_cb: CB function for DL data transfer
510*4882a593Smuzhiyun  * @status_cb: CB functions for asynchronous status
511*4882a593Smuzhiyun  * @driver: Device driver model driver
512*4882a593Smuzhiyun  */
513*4882a593Smuzhiyun struct mhi_driver {
514*4882a593Smuzhiyun 	const struct mhi_device_id *id_table;
515*4882a593Smuzhiyun 	int (*probe)(struct mhi_device *mhi_dev,
516*4882a593Smuzhiyun 		     const struct mhi_device_id *id);
517*4882a593Smuzhiyun 	void (*remove)(struct mhi_device *mhi_dev);
518*4882a593Smuzhiyun 	void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
519*4882a593Smuzhiyun 			   struct mhi_result *result);
520*4882a593Smuzhiyun 	void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
521*4882a593Smuzhiyun 			   struct mhi_result *result);
522*4882a593Smuzhiyun 	void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
523*4882a593Smuzhiyun 	struct device_driver driver;
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun #define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
527*4882a593Smuzhiyun #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /**
530*4882a593Smuzhiyun  * mhi_alloc_controller - Allocate the MHI Controller structure
531*4882a593Smuzhiyun  * Allocate the mhi_controller structure using zero initialized memory
532*4882a593Smuzhiyun  */
533*4882a593Smuzhiyun struct mhi_controller *mhi_alloc_controller(void);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun  * mhi_free_controller - Free the MHI Controller structure
537*4882a593Smuzhiyun  * Free the mhi_controller structure which was previously allocated
538*4882a593Smuzhiyun  */
539*4882a593Smuzhiyun void mhi_free_controller(struct mhi_controller *mhi_cntrl);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun  * mhi_register_controller - Register MHI controller
543*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller to register
544*4882a593Smuzhiyun  * @config: Configuration to use for the controller
545*4882a593Smuzhiyun  */
546*4882a593Smuzhiyun int mhi_register_controller(struct mhi_controller *mhi_cntrl,
547*4882a593Smuzhiyun 			const struct mhi_controller_config *config);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /**
550*4882a593Smuzhiyun  * mhi_unregister_controller - Unregister MHI controller
551*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller to unregister
552*4882a593Smuzhiyun  */
553*4882a593Smuzhiyun void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun  * module_mhi_driver() - Helper macro for drivers that don't do
557*4882a593Smuzhiyun  * anything special other than using default mhi_driver_register() and
558*4882a593Smuzhiyun  * mhi_driver_unregister().  This eliminates a lot of boilerplate.
559*4882a593Smuzhiyun  * Each module may only use this macro once.
560*4882a593Smuzhiyun  */
561*4882a593Smuzhiyun #define module_mhi_driver(mhi_drv) \
562*4882a593Smuzhiyun 	module_driver(mhi_drv, mhi_driver_register, \
563*4882a593Smuzhiyun 		      mhi_driver_unregister)
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun  * Macro to avoid include chaining to get THIS_MODULE
567*4882a593Smuzhiyun  */
568*4882a593Smuzhiyun #define mhi_driver_register(mhi_drv) \
569*4882a593Smuzhiyun 	__mhi_driver_register(mhi_drv, THIS_MODULE)
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun /**
572*4882a593Smuzhiyun  * __mhi_driver_register - Register driver with MHI framework
573*4882a593Smuzhiyun  * @mhi_drv: Driver associated with the device
574*4882a593Smuzhiyun  * @owner: The module owner
575*4882a593Smuzhiyun  */
576*4882a593Smuzhiyun int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /**
579*4882a593Smuzhiyun  * mhi_driver_unregister - Unregister a driver for mhi_devices
580*4882a593Smuzhiyun  * @mhi_drv: Driver associated with the device
581*4882a593Smuzhiyun  */
582*4882a593Smuzhiyun void mhi_driver_unregister(struct mhi_driver *mhi_drv);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun /**
585*4882a593Smuzhiyun  * mhi_set_mhi_state - Set MHI device state
586*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
587*4882a593Smuzhiyun  * @state: State to set
588*4882a593Smuzhiyun  */
589*4882a593Smuzhiyun void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
590*4882a593Smuzhiyun 		       enum mhi_state state);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun  * mhi_notify - Notify the MHI client driver about client device status
594*4882a593Smuzhiyun  * @mhi_dev: MHI device instance
595*4882a593Smuzhiyun  * @cb_reason: MHI callback reason
596*4882a593Smuzhiyun  */
597*4882a593Smuzhiyun void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /**
600*4882a593Smuzhiyun  * mhi_prepare_for_power_up - Do pre-initialization before power up.
601*4882a593Smuzhiyun  *                            This is optional, call this before power up if
602*4882a593Smuzhiyun  *                            the controller does not want bus framework to
603*4882a593Smuzhiyun  *                            automatically free any allocated memory during
604*4882a593Smuzhiyun  *                            shutdown process.
605*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
606*4882a593Smuzhiyun  */
607*4882a593Smuzhiyun int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /**
610*4882a593Smuzhiyun  * mhi_async_power_up - Start MHI power up sequence
611*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
612*4882a593Smuzhiyun  */
613*4882a593Smuzhiyun int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun  * mhi_sync_power_up - Start MHI power up sequence and wait till the device
617*4882a593Smuzhiyun  *                     enters valid EE state
618*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
619*4882a593Smuzhiyun  */
620*4882a593Smuzhiyun int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /**
623*4882a593Smuzhiyun  * mhi_power_down - Start MHI power down sequence
624*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
625*4882a593Smuzhiyun  * @graceful: Link is still accessible, so do a graceful shutdown process
626*4882a593Smuzhiyun  */
627*4882a593Smuzhiyun void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun /**
630*4882a593Smuzhiyun  * mhi_unprepare_after_power_down - Free any allocated memory after power down
631*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
632*4882a593Smuzhiyun  */
633*4882a593Smuzhiyun void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun  * mhi_pm_suspend - Move MHI into a suspended state
637*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
638*4882a593Smuzhiyun  */
639*4882a593Smuzhiyun int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun /**
642*4882a593Smuzhiyun  * mhi_pm_resume - Resume MHI from suspended state
643*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
644*4882a593Smuzhiyun  */
645*4882a593Smuzhiyun int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /**
648*4882a593Smuzhiyun  * mhi_download_rddm_img - Download ramdump image from device for
649*4882a593Smuzhiyun  *                         debugging purpose.
650*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
651*4882a593Smuzhiyun  * @in_panic: Download rddm image during kernel panic
652*4882a593Smuzhiyun  */
653*4882a593Smuzhiyun int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun  * mhi_force_rddm_mode - Force device into rddm mode
657*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
658*4882a593Smuzhiyun  */
659*4882a593Smuzhiyun int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun /**
662*4882a593Smuzhiyun  * mhi_get_mhi_state - Get MHI state of the device
663*4882a593Smuzhiyun  * @mhi_cntrl: MHI controller
664*4882a593Smuzhiyun  */
665*4882a593Smuzhiyun enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /**
668*4882a593Smuzhiyun  * mhi_device_get - Disable device low power mode
669*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channel
670*4882a593Smuzhiyun  */
671*4882a593Smuzhiyun void mhi_device_get(struct mhi_device *mhi_dev);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun /**
674*4882a593Smuzhiyun  * mhi_device_get_sync - Disable device low power mode. Synchronously
675*4882a593Smuzhiyun  *                       take the controller out of suspended state
676*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channel
677*4882a593Smuzhiyun  */
678*4882a593Smuzhiyun int mhi_device_get_sync(struct mhi_device *mhi_dev);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /**
681*4882a593Smuzhiyun  * mhi_device_put - Re-enable device low power mode
682*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channel
683*4882a593Smuzhiyun  */
684*4882a593Smuzhiyun void mhi_device_put(struct mhi_device *mhi_dev);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun /**
687*4882a593Smuzhiyun  * mhi_prepare_for_transfer - Setup channel for data transfer
688*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
689*4882a593Smuzhiyun  */
690*4882a593Smuzhiyun int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun /**
693*4882a593Smuzhiyun  * mhi_unprepare_from_transfer - Unprepare the channels
694*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
695*4882a593Smuzhiyun  */
696*4882a593Smuzhiyun void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun /**
699*4882a593Smuzhiyun  * mhi_poll - Poll for any available data in DL direction
700*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
701*4882a593Smuzhiyun  * @budget: # of events to process
702*4882a593Smuzhiyun  */
703*4882a593Smuzhiyun int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun /**
706*4882a593Smuzhiyun  * mhi_queue_dma - Send or receive DMA mapped buffers from client device
707*4882a593Smuzhiyun  *                 over MHI channel
708*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
709*4882a593Smuzhiyun  * @dir: DMA direction for the channel
710*4882a593Smuzhiyun  * @mhi_buf: Buffer for holding the DMA mapped data
711*4882a593Smuzhiyun  * @len: Buffer length
712*4882a593Smuzhiyun  * @mflags: MHI transfer flags used for the transfer
713*4882a593Smuzhiyun  */
714*4882a593Smuzhiyun int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
715*4882a593Smuzhiyun 		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun  * mhi_queue_buf - Send or receive raw buffers from client device over MHI
719*4882a593Smuzhiyun  *                 channel
720*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
721*4882a593Smuzhiyun  * @dir: DMA direction for the channel
722*4882a593Smuzhiyun  * @buf: Buffer for holding the data
723*4882a593Smuzhiyun  * @len: Buffer length
724*4882a593Smuzhiyun  * @mflags: MHI transfer flags used for the transfer
725*4882a593Smuzhiyun  */
726*4882a593Smuzhiyun int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
727*4882a593Smuzhiyun 		  void *buf, size_t len, enum mhi_flags mflags);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun /**
730*4882a593Smuzhiyun  * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
731*4882a593Smuzhiyun  * @mhi_dev: Device associated with the channels
732*4882a593Smuzhiyun  * @dir: DMA direction for the channel
733*4882a593Smuzhiyun  * @skb: Buffer for holding SKBs
734*4882a593Smuzhiyun  * @len: Buffer length
735*4882a593Smuzhiyun  * @mflags: MHI transfer flags used for the transfer
736*4882a593Smuzhiyun  */
737*4882a593Smuzhiyun int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
738*4882a593Smuzhiyun 		  struct sk_buff *skb, size_t len, enum mhi_flags mflags);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun #endif /* _MHI_H_ */
741