1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * cmt_speech.c - HSI CMT speech driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
8*4882a593Smuzhiyun * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/miscdevice.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/fs.h>
20*4882a593Smuzhiyun #include <linux/poll.h>
21*4882a593Smuzhiyun #include <linux/sched/signal.h>
22*4882a593Smuzhiyun #include <linux/ioctl.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <linux/pm_qos.h>
25*4882a593Smuzhiyun #include <linux/hsi/hsi.h>
26*4882a593Smuzhiyun #include <linux/hsi/ssi_protocol.h>
27*4882a593Smuzhiyun #include <linux/hsi/cs-protocol.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define CS_MMAP_SIZE PAGE_SIZE
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct char_queue {
32*4882a593Smuzhiyun struct list_head list;
33*4882a593Smuzhiyun u32 msg;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct cs_char {
37*4882a593Smuzhiyun unsigned int opened;
38*4882a593Smuzhiyun struct hsi_client *cl;
39*4882a593Smuzhiyun struct cs_hsi_iface *hi;
40*4882a593Smuzhiyun struct list_head chardev_queue;
41*4882a593Smuzhiyun struct list_head dataind_queue;
42*4882a593Smuzhiyun int dataind_pending;
43*4882a593Smuzhiyun /* mmap things */
44*4882a593Smuzhiyun unsigned long mmap_base;
45*4882a593Smuzhiyun unsigned long mmap_size;
46*4882a593Smuzhiyun spinlock_t lock;
47*4882a593Smuzhiyun struct fasync_struct *async_queue;
48*4882a593Smuzhiyun wait_queue_head_t wait;
49*4882a593Smuzhiyun /* hsi channel ids */
50*4882a593Smuzhiyun int channel_id_cmd;
51*4882a593Smuzhiyun int channel_id_data;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define SSI_CHANNEL_STATE_READING 1
55*4882a593Smuzhiyun #define SSI_CHANNEL_STATE_WRITING (1 << 1)
56*4882a593Smuzhiyun #define SSI_CHANNEL_STATE_POLL (1 << 2)
57*4882a593Smuzhiyun #define SSI_CHANNEL_STATE_ERROR (1 << 3)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define TARGET_MASK 0xf000000
60*4882a593Smuzhiyun #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
61*4882a593Smuzhiyun #define TARGET_LOCAL 0
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Number of pre-allocated commands buffers */
64*4882a593Smuzhiyun #define CS_MAX_CMDS 4
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * During data transfers, transactions must be handled
68*4882a593Smuzhiyun * within 20ms (fixed value in cmtspeech HSI protocol)
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Timeout to wait for pending HSI transfers to complete */
73*4882a593Smuzhiyun #define CS_HSI_TRANSFER_TIMEOUT_MS 500
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define RX_PTR_BOUNDARY_SHIFT 8
77*4882a593Smuzhiyun #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
78*4882a593Smuzhiyun CS_MAX_BUFFERS_SHIFT)
79*4882a593Smuzhiyun struct cs_hsi_iface {
80*4882a593Smuzhiyun struct hsi_client *cl;
81*4882a593Smuzhiyun struct hsi_client *master;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun unsigned int iface_state;
84*4882a593Smuzhiyun unsigned int wakeline_state;
85*4882a593Smuzhiyun unsigned int control_state;
86*4882a593Smuzhiyun unsigned int data_state;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* state exposed to application */
89*4882a593Smuzhiyun struct cs_mmap_config_block *mmap_cfg;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun unsigned long mmap_base;
92*4882a593Smuzhiyun unsigned long mmap_size;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun unsigned int rx_slot;
95*4882a593Smuzhiyun unsigned int tx_slot;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* note: for security reasons, we do not trust the contents of
98*4882a593Smuzhiyun * mmap_cfg, but instead duplicate the variables here */
99*4882a593Smuzhiyun unsigned int buf_size;
100*4882a593Smuzhiyun unsigned int rx_bufs;
101*4882a593Smuzhiyun unsigned int tx_bufs;
102*4882a593Smuzhiyun unsigned int rx_ptr_boundary;
103*4882a593Smuzhiyun unsigned int rx_offsets[CS_MAX_BUFFERS];
104*4882a593Smuzhiyun unsigned int tx_offsets[CS_MAX_BUFFERS];
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* size of aligned memory blocks */
107*4882a593Smuzhiyun unsigned int slot_size;
108*4882a593Smuzhiyun unsigned int flags;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun struct list_head cmdqueue;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun struct hsi_msg *data_rx_msg;
113*4882a593Smuzhiyun struct hsi_msg *data_tx_msg;
114*4882a593Smuzhiyun wait_queue_head_t datawait;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun struct pm_qos_request pm_qos_req;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun spinlock_t lock;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun static struct cs_char cs_char_data;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
124*4882a593Smuzhiyun static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
125*4882a593Smuzhiyun
rx_ptr_shift_too_big(void)126*4882a593Smuzhiyun static inline void rx_ptr_shift_too_big(void)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
cs_notify(u32 message,struct list_head * head)131*4882a593Smuzhiyun static void cs_notify(u32 message, struct list_head *head)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct char_queue *entry;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun spin_lock(&cs_char_data.lock);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (!cs_char_data.opened) {
138*4882a593Smuzhiyun spin_unlock(&cs_char_data.lock);
139*4882a593Smuzhiyun goto out;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
143*4882a593Smuzhiyun if (!entry) {
144*4882a593Smuzhiyun dev_err(&cs_char_data.cl->device,
145*4882a593Smuzhiyun "Can't allocate new entry for the queue.\n");
146*4882a593Smuzhiyun spin_unlock(&cs_char_data.lock);
147*4882a593Smuzhiyun goto out;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun entry->msg = message;
151*4882a593Smuzhiyun list_add_tail(&entry->list, head);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun spin_unlock(&cs_char_data.lock);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun wake_up_interruptible(&cs_char_data.wait);
156*4882a593Smuzhiyun kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun out:
159*4882a593Smuzhiyun return;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
cs_pop_entry(struct list_head * head)162*4882a593Smuzhiyun static u32 cs_pop_entry(struct list_head *head)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct char_queue *entry;
165*4882a593Smuzhiyun u32 data;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun entry = list_entry(head->next, struct char_queue, list);
168*4882a593Smuzhiyun data = entry->msg;
169*4882a593Smuzhiyun list_del(&entry->list);
170*4882a593Smuzhiyun kfree(entry);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return data;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
cs_notify_control(u32 message)175*4882a593Smuzhiyun static void cs_notify_control(u32 message)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun cs_notify(message, &cs_char_data.chardev_queue);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
cs_notify_data(u32 message,int maxlength)180*4882a593Smuzhiyun static void cs_notify_data(u32 message, int maxlength)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun cs_notify(message, &cs_char_data.dataind_queue);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun spin_lock(&cs_char_data.lock);
185*4882a593Smuzhiyun cs_char_data.dataind_pending++;
186*4882a593Smuzhiyun while (cs_char_data.dataind_pending > maxlength &&
187*4882a593Smuzhiyun !list_empty(&cs_char_data.dataind_queue)) {
188*4882a593Smuzhiyun dev_dbg(&cs_char_data.cl->device, "data notification "
189*4882a593Smuzhiyun "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun cs_pop_entry(&cs_char_data.dataind_queue);
192*4882a593Smuzhiyun cs_char_data.dataind_pending--;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun spin_unlock(&cs_char_data.lock);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
cs_set_cmd(struct hsi_msg * msg,u32 cmd)197*4882a593Smuzhiyun static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun u32 *data = sg_virt(msg->sgt.sgl);
200*4882a593Smuzhiyun *data = cmd;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
cs_get_cmd(struct hsi_msg * msg)203*4882a593Smuzhiyun static inline u32 cs_get_cmd(struct hsi_msg *msg)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun u32 *data = sg_virt(msg->sgt.sgl);
206*4882a593Smuzhiyun return *data;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
cs_release_cmd(struct hsi_msg * msg)209*4882a593Smuzhiyun static void cs_release_cmd(struct hsi_msg *msg)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun list_add_tail(&msg->link, &hi->cmdqueue);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
cs_cmd_destructor(struct hsi_msg * msg)216*4882a593Smuzhiyun static void cs_cmd_destructor(struct hsi_msg *msg)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun spin_lock(&hi->lock);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (hi->iface_state != CS_STATE_CLOSED)
225*4882a593Smuzhiyun dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (msg->ttype == HSI_MSG_READ)
228*4882a593Smuzhiyun hi->control_state &=
229*4882a593Smuzhiyun ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
230*4882a593Smuzhiyun else if (msg->ttype == HSI_MSG_WRITE &&
231*4882a593Smuzhiyun hi->control_state & SSI_CHANNEL_STATE_WRITING)
232*4882a593Smuzhiyun hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun cs_release_cmd(msg);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun spin_unlock(&hi->lock);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
cs_claim_cmd(struct cs_hsi_iface * ssi)239*4882a593Smuzhiyun static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct hsi_msg *msg;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun BUG_ON(list_empty(&ssi->cmdqueue));
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
246*4882a593Smuzhiyun list_del(&msg->link);
247*4882a593Smuzhiyun msg->destructor = cs_cmd_destructor;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return msg;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
cs_free_cmds(struct cs_hsi_iface * ssi)252*4882a593Smuzhiyun static void cs_free_cmds(struct cs_hsi_iface *ssi)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct hsi_msg *msg, *tmp;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
257*4882a593Smuzhiyun list_del(&msg->link);
258*4882a593Smuzhiyun msg->destructor = NULL;
259*4882a593Smuzhiyun kfree(sg_virt(msg->sgt.sgl));
260*4882a593Smuzhiyun hsi_free_msg(msg);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
cs_alloc_cmds(struct cs_hsi_iface * hi)264*4882a593Smuzhiyun static int cs_alloc_cmds(struct cs_hsi_iface *hi)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct hsi_msg *msg;
267*4882a593Smuzhiyun u32 *buf;
268*4882a593Smuzhiyun unsigned int i;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun INIT_LIST_HEAD(&hi->cmdqueue);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun for (i = 0; i < CS_MAX_CMDS; i++) {
273*4882a593Smuzhiyun msg = hsi_alloc_msg(1, GFP_KERNEL);
274*4882a593Smuzhiyun if (!msg)
275*4882a593Smuzhiyun goto out;
276*4882a593Smuzhiyun buf = kmalloc(sizeof(*buf), GFP_KERNEL);
277*4882a593Smuzhiyun if (!buf) {
278*4882a593Smuzhiyun hsi_free_msg(msg);
279*4882a593Smuzhiyun goto out;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
282*4882a593Smuzhiyun msg->channel = cs_char_data.channel_id_cmd;
283*4882a593Smuzhiyun msg->context = hi;
284*4882a593Smuzhiyun list_add_tail(&msg->link, &hi->cmdqueue);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun out:
290*4882a593Smuzhiyun cs_free_cmds(hi);
291*4882a593Smuzhiyun return -ENOMEM;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
cs_hsi_data_destructor(struct hsi_msg * msg)294*4882a593Smuzhiyun static void cs_hsi_data_destructor(struct hsi_msg *msg)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
297*4882a593Smuzhiyun const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun spin_lock(&hi->lock);
302*4882a593Smuzhiyun if (hi->iface_state != CS_STATE_CLOSED)
303*4882a593Smuzhiyun dev_err(&cs_char_data.cl->device,
304*4882a593Smuzhiyun "Data %s flush while device active\n", dir);
305*4882a593Smuzhiyun if (msg->ttype == HSI_MSG_READ)
306*4882a593Smuzhiyun hi->data_state &=
307*4882a593Smuzhiyun ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
308*4882a593Smuzhiyun else
309*4882a593Smuzhiyun hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun msg->status = HSI_STATUS_COMPLETED;
312*4882a593Smuzhiyun if (unlikely(waitqueue_active(&hi->datawait)))
313*4882a593Smuzhiyun wake_up_interruptible(&hi->datawait);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun spin_unlock(&hi->lock);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
cs_hsi_alloc_data(struct cs_hsi_iface * hi)318*4882a593Smuzhiyun static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct hsi_msg *txmsg, *rxmsg;
321*4882a593Smuzhiyun int res = 0;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
324*4882a593Smuzhiyun if (!rxmsg) {
325*4882a593Smuzhiyun res = -ENOMEM;
326*4882a593Smuzhiyun goto out1;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun rxmsg->channel = cs_char_data.channel_id_data;
329*4882a593Smuzhiyun rxmsg->destructor = cs_hsi_data_destructor;
330*4882a593Smuzhiyun rxmsg->context = hi;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun txmsg = hsi_alloc_msg(1, GFP_KERNEL);
333*4882a593Smuzhiyun if (!txmsg) {
334*4882a593Smuzhiyun res = -ENOMEM;
335*4882a593Smuzhiyun goto out2;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun txmsg->channel = cs_char_data.channel_id_data;
338*4882a593Smuzhiyun txmsg->destructor = cs_hsi_data_destructor;
339*4882a593Smuzhiyun txmsg->context = hi;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun hi->data_rx_msg = rxmsg;
342*4882a593Smuzhiyun hi->data_tx_msg = txmsg;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun out2:
347*4882a593Smuzhiyun hsi_free_msg(rxmsg);
348*4882a593Smuzhiyun out1:
349*4882a593Smuzhiyun return res;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
cs_hsi_free_data_msg(struct hsi_msg * msg)352*4882a593Smuzhiyun static void cs_hsi_free_data_msg(struct hsi_msg *msg)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
355*4882a593Smuzhiyun msg->status != HSI_STATUS_ERROR);
356*4882a593Smuzhiyun hsi_free_msg(msg);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
cs_hsi_free_data(struct cs_hsi_iface * hi)359*4882a593Smuzhiyun static void cs_hsi_free_data(struct cs_hsi_iface *hi)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun cs_hsi_free_data_msg(hi->data_rx_msg);
362*4882a593Smuzhiyun cs_hsi_free_data_msg(hi->data_tx_msg);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
__cs_hsi_error_pre(struct cs_hsi_iface * hi,struct hsi_msg * msg,const char * info,unsigned int * state)365*4882a593Smuzhiyun static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
366*4882a593Smuzhiyun struct hsi_msg *msg, const char *info,
367*4882a593Smuzhiyun unsigned int *state)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun spin_lock(&hi->lock);
370*4882a593Smuzhiyun dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
371*4882a593Smuzhiyun info, msg->status, *state);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
__cs_hsi_error_post(struct cs_hsi_iface * hi)374*4882a593Smuzhiyun static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun spin_unlock(&hi->lock);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
__cs_hsi_error_read_bits(unsigned int * state)379*4882a593Smuzhiyun static inline void __cs_hsi_error_read_bits(unsigned int *state)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun *state |= SSI_CHANNEL_STATE_ERROR;
382*4882a593Smuzhiyun *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
__cs_hsi_error_write_bits(unsigned int * state)385*4882a593Smuzhiyun static inline void __cs_hsi_error_write_bits(unsigned int *state)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun *state |= SSI_CHANNEL_STATE_ERROR;
388*4882a593Smuzhiyun *state &= ~SSI_CHANNEL_STATE_WRITING;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
cs_hsi_control_read_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)391*4882a593Smuzhiyun static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
392*4882a593Smuzhiyun struct hsi_msg *msg)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
395*4882a593Smuzhiyun cs_release_cmd(msg);
396*4882a593Smuzhiyun __cs_hsi_error_read_bits(&hi->control_state);
397*4882a593Smuzhiyun __cs_hsi_error_post(hi);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
cs_hsi_control_write_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)400*4882a593Smuzhiyun static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
401*4882a593Smuzhiyun struct hsi_msg *msg)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
404*4882a593Smuzhiyun cs_release_cmd(msg);
405*4882a593Smuzhiyun __cs_hsi_error_write_bits(&hi->control_state);
406*4882a593Smuzhiyun __cs_hsi_error_post(hi);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
cs_hsi_data_read_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)410*4882a593Smuzhiyun static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
413*4882a593Smuzhiyun __cs_hsi_error_read_bits(&hi->data_state);
414*4882a593Smuzhiyun __cs_hsi_error_post(hi);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
cs_hsi_data_write_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)417*4882a593Smuzhiyun static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
418*4882a593Smuzhiyun struct hsi_msg *msg)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
421*4882a593Smuzhiyun __cs_hsi_error_write_bits(&hi->data_state);
422*4882a593Smuzhiyun __cs_hsi_error_post(hi);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
cs_hsi_read_on_control_complete(struct hsi_msg * msg)425*4882a593Smuzhiyun static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun u32 cmd = cs_get_cmd(msg);
428*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun spin_lock(&hi->lock);
431*4882a593Smuzhiyun hi->control_state &= ~SSI_CHANNEL_STATE_READING;
432*4882a593Smuzhiyun if (msg->status == HSI_STATUS_ERROR) {
433*4882a593Smuzhiyun dev_err(&hi->cl->device, "Control RX error detected\n");
434*4882a593Smuzhiyun spin_unlock(&hi->lock);
435*4882a593Smuzhiyun cs_hsi_control_read_error(hi, msg);
436*4882a593Smuzhiyun goto out;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
439*4882a593Smuzhiyun cs_release_cmd(msg);
440*4882a593Smuzhiyun if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
441*4882a593Smuzhiyun struct timespec64 tspec;
442*4882a593Smuzhiyun struct cs_timestamp *tstamp =
443*4882a593Smuzhiyun &hi->mmap_cfg->tstamp_rx_ctrl;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun ktime_get_ts64(&tspec);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun tstamp->tv_sec = (__u32) tspec.tv_sec;
448*4882a593Smuzhiyun tstamp->tv_nsec = (__u32) tspec.tv_nsec;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun spin_unlock(&hi->lock);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun cs_notify_control(cmd);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun out:
455*4882a593Smuzhiyun cs_hsi_read_on_control(hi);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
cs_hsi_peek_on_control_complete(struct hsi_msg * msg)458*4882a593Smuzhiyun static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
461*4882a593Smuzhiyun int ret;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (msg->status == HSI_STATUS_ERROR) {
464*4882a593Smuzhiyun dev_err(&hi->cl->device, "Control peek RX error detected\n");
465*4882a593Smuzhiyun cs_hsi_control_read_error(hi, msg);
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
472*4882a593Smuzhiyun msg->sgt.nents = 1;
473*4882a593Smuzhiyun msg->complete = cs_hsi_read_on_control_complete;
474*4882a593Smuzhiyun ret = hsi_async_read(hi->cl, msg);
475*4882a593Smuzhiyun if (ret)
476*4882a593Smuzhiyun cs_hsi_control_read_error(hi, msg);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
cs_hsi_read_on_control(struct cs_hsi_iface * hi)479*4882a593Smuzhiyun static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct hsi_msg *msg;
482*4882a593Smuzhiyun int ret;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun spin_lock(&hi->lock);
485*4882a593Smuzhiyun if (hi->control_state & SSI_CHANNEL_STATE_READING) {
486*4882a593Smuzhiyun dev_err(&hi->cl->device, "Control read already pending (%d)\n",
487*4882a593Smuzhiyun hi->control_state);
488*4882a593Smuzhiyun spin_unlock(&hi->lock);
489*4882a593Smuzhiyun return;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
492*4882a593Smuzhiyun dev_err(&hi->cl->device, "Control read error (%d)\n",
493*4882a593Smuzhiyun hi->control_state);
494*4882a593Smuzhiyun spin_unlock(&hi->lock);
495*4882a593Smuzhiyun return;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun hi->control_state |= SSI_CHANNEL_STATE_READING;
498*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "Issuing RX on control\n");
499*4882a593Smuzhiyun msg = cs_claim_cmd(hi);
500*4882a593Smuzhiyun spin_unlock(&hi->lock);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun msg->sgt.nents = 0;
503*4882a593Smuzhiyun msg->complete = cs_hsi_peek_on_control_complete;
504*4882a593Smuzhiyun ret = hsi_async_read(hi->cl, msg);
505*4882a593Smuzhiyun if (ret)
506*4882a593Smuzhiyun cs_hsi_control_read_error(hi, msg);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
cs_hsi_write_on_control_complete(struct hsi_msg * msg)509*4882a593Smuzhiyun static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
512*4882a593Smuzhiyun if (msg->status == HSI_STATUS_COMPLETED) {
513*4882a593Smuzhiyun spin_lock(&hi->lock);
514*4882a593Smuzhiyun hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
515*4882a593Smuzhiyun cs_release_cmd(msg);
516*4882a593Smuzhiyun spin_unlock(&hi->lock);
517*4882a593Smuzhiyun } else if (msg->status == HSI_STATUS_ERROR) {
518*4882a593Smuzhiyun cs_hsi_control_write_error(hi, msg);
519*4882a593Smuzhiyun } else {
520*4882a593Smuzhiyun dev_err(&hi->cl->device,
521*4882a593Smuzhiyun "unexpected status in control write callback %d\n",
522*4882a593Smuzhiyun msg->status);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
cs_hsi_write_on_control(struct cs_hsi_iface * hi,u32 message)526*4882a593Smuzhiyun static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct hsi_msg *msg;
529*4882a593Smuzhiyun int ret;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun spin_lock(&hi->lock);
532*4882a593Smuzhiyun if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
533*4882a593Smuzhiyun spin_unlock(&hi->lock);
534*4882a593Smuzhiyun return -EIO;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
537*4882a593Smuzhiyun dev_err(&hi->cl->device,
538*4882a593Smuzhiyun "Write still pending on control channel.\n");
539*4882a593Smuzhiyun spin_unlock(&hi->lock);
540*4882a593Smuzhiyun return -EBUSY;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun hi->control_state |= SSI_CHANNEL_STATE_WRITING;
543*4882a593Smuzhiyun msg = cs_claim_cmd(hi);
544*4882a593Smuzhiyun spin_unlock(&hi->lock);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun cs_set_cmd(msg, message);
547*4882a593Smuzhiyun msg->sgt.nents = 1;
548*4882a593Smuzhiyun msg->complete = cs_hsi_write_on_control_complete;
549*4882a593Smuzhiyun dev_dbg(&hi->cl->device,
550*4882a593Smuzhiyun "Sending control message %08X\n", message);
551*4882a593Smuzhiyun ret = hsi_async_write(hi->cl, msg);
552*4882a593Smuzhiyun if (ret) {
553*4882a593Smuzhiyun dev_err(&hi->cl->device,
554*4882a593Smuzhiyun "async_write failed with %d\n", ret);
555*4882a593Smuzhiyun cs_hsi_control_write_error(hi, msg);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun * Make sure control read is always pending when issuing
560*4882a593Smuzhiyun * new control writes. This is needed as the controller
561*4882a593Smuzhiyun * may flush our messages if e.g. the peer device reboots
562*4882a593Smuzhiyun * unexpectedly (and we cannot directly resubmit a new read from
563*4882a593Smuzhiyun * the message destructor; see cs_cmd_destructor()).
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
566*4882a593Smuzhiyun dev_err(&hi->cl->device, "Restarting control reads\n");
567*4882a593Smuzhiyun cs_hsi_read_on_control(hi);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun return 0;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
cs_hsi_read_on_data_complete(struct hsi_msg * msg)573*4882a593Smuzhiyun static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
576*4882a593Smuzhiyun u32 payload;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (unlikely(msg->status == HSI_STATUS_ERROR)) {
579*4882a593Smuzhiyun cs_hsi_data_read_error(hi, msg);
580*4882a593Smuzhiyun return;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun spin_lock(&hi->lock);
584*4882a593Smuzhiyun WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
585*4882a593Smuzhiyun hi->data_state &= ~SSI_CHANNEL_STATE_READING;
586*4882a593Smuzhiyun payload = CS_RX_DATA_RECEIVED;
587*4882a593Smuzhiyun payload |= hi->rx_slot;
588*4882a593Smuzhiyun hi->rx_slot++;
589*4882a593Smuzhiyun hi->rx_slot %= hi->rx_ptr_boundary;
590*4882a593Smuzhiyun /* expose current rx ptr in mmap area */
591*4882a593Smuzhiyun hi->mmap_cfg->rx_ptr = hi->rx_slot;
592*4882a593Smuzhiyun if (unlikely(waitqueue_active(&hi->datawait)))
593*4882a593Smuzhiyun wake_up_interruptible(&hi->datawait);
594*4882a593Smuzhiyun spin_unlock(&hi->lock);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun cs_notify_data(payload, hi->rx_bufs);
597*4882a593Smuzhiyun cs_hsi_read_on_data(hi);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
cs_hsi_peek_on_data_complete(struct hsi_msg * msg)600*4882a593Smuzhiyun static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
603*4882a593Smuzhiyun u32 *address;
604*4882a593Smuzhiyun int ret;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (unlikely(msg->status == HSI_STATUS_ERROR)) {
607*4882a593Smuzhiyun cs_hsi_data_read_error(hi, msg);
608*4882a593Smuzhiyun return;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
611*4882a593Smuzhiyun dev_err(&hi->cl->device, "Data received in invalid state\n");
612*4882a593Smuzhiyun cs_hsi_data_read_error(hi, msg);
613*4882a593Smuzhiyun return;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun spin_lock(&hi->lock);
617*4882a593Smuzhiyun WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
618*4882a593Smuzhiyun hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
619*4882a593Smuzhiyun hi->data_state |= SSI_CHANNEL_STATE_READING;
620*4882a593Smuzhiyun spin_unlock(&hi->lock);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun address = (u32 *)(hi->mmap_base +
623*4882a593Smuzhiyun hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
624*4882a593Smuzhiyun sg_init_one(msg->sgt.sgl, address, hi->buf_size);
625*4882a593Smuzhiyun msg->sgt.nents = 1;
626*4882a593Smuzhiyun msg->complete = cs_hsi_read_on_data_complete;
627*4882a593Smuzhiyun ret = hsi_async_read(hi->cl, msg);
628*4882a593Smuzhiyun if (ret)
629*4882a593Smuzhiyun cs_hsi_data_read_error(hi, msg);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * Read/write transaction is ongoing. Returns false if in
634*4882a593Smuzhiyun * SSI_CHANNEL_STATE_POLL state.
635*4882a593Smuzhiyun */
cs_state_xfer_active(unsigned int state)636*4882a593Smuzhiyun static inline int cs_state_xfer_active(unsigned int state)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun return (state & SSI_CHANNEL_STATE_WRITING) ||
639*4882a593Smuzhiyun (state & SSI_CHANNEL_STATE_READING);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * No pending read/writes
644*4882a593Smuzhiyun */
cs_state_idle(unsigned int state)645*4882a593Smuzhiyun static inline int cs_state_idle(unsigned int state)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun return !(state & ~SSI_CHANNEL_STATE_ERROR);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
cs_hsi_read_on_data(struct cs_hsi_iface * hi)650*4882a593Smuzhiyun static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct hsi_msg *rxmsg;
653*4882a593Smuzhiyun int ret;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun spin_lock(&hi->lock);
656*4882a593Smuzhiyun if (hi->data_state &
657*4882a593Smuzhiyun (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
658*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
659*4882a593Smuzhiyun hi->data_state);
660*4882a593Smuzhiyun spin_unlock(&hi->lock);
661*4882a593Smuzhiyun return;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun hi->data_state |= SSI_CHANNEL_STATE_POLL;
664*4882a593Smuzhiyun spin_unlock(&hi->lock);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun rxmsg = hi->data_rx_msg;
667*4882a593Smuzhiyun sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
668*4882a593Smuzhiyun rxmsg->sgt.nents = 0;
669*4882a593Smuzhiyun rxmsg->complete = cs_hsi_peek_on_data_complete;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun ret = hsi_async_read(hi->cl, rxmsg);
672*4882a593Smuzhiyun if (ret)
673*4882a593Smuzhiyun cs_hsi_data_read_error(hi, rxmsg);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
cs_hsi_write_on_data_complete(struct hsi_msg * msg)676*4882a593Smuzhiyun static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun struct cs_hsi_iface *hi = msg->context;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (msg->status == HSI_STATUS_COMPLETED) {
681*4882a593Smuzhiyun spin_lock(&hi->lock);
682*4882a593Smuzhiyun hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
683*4882a593Smuzhiyun if (unlikely(waitqueue_active(&hi->datawait)))
684*4882a593Smuzhiyun wake_up_interruptible(&hi->datawait);
685*4882a593Smuzhiyun spin_unlock(&hi->lock);
686*4882a593Smuzhiyun } else {
687*4882a593Smuzhiyun cs_hsi_data_write_error(hi, msg);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
cs_hsi_write_on_data(struct cs_hsi_iface * hi,unsigned int slot)691*4882a593Smuzhiyun static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun u32 *address;
694*4882a593Smuzhiyun struct hsi_msg *txmsg;
695*4882a593Smuzhiyun int ret;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun spin_lock(&hi->lock);
698*4882a593Smuzhiyun if (hi->iface_state != CS_STATE_CONFIGURED) {
699*4882a593Smuzhiyun dev_err(&hi->cl->device, "Not configured, aborting\n");
700*4882a593Smuzhiyun ret = -EINVAL;
701*4882a593Smuzhiyun goto error;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
704*4882a593Smuzhiyun dev_err(&hi->cl->device, "HSI error, aborting\n");
705*4882a593Smuzhiyun ret = -EIO;
706*4882a593Smuzhiyun goto error;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
709*4882a593Smuzhiyun dev_err(&hi->cl->device, "Write pending on data channel.\n");
710*4882a593Smuzhiyun ret = -EBUSY;
711*4882a593Smuzhiyun goto error;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun hi->data_state |= SSI_CHANNEL_STATE_WRITING;
714*4882a593Smuzhiyun spin_unlock(&hi->lock);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun hi->tx_slot = slot;
717*4882a593Smuzhiyun address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
718*4882a593Smuzhiyun txmsg = hi->data_tx_msg;
719*4882a593Smuzhiyun sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
720*4882a593Smuzhiyun txmsg->complete = cs_hsi_write_on_data_complete;
721*4882a593Smuzhiyun ret = hsi_async_write(hi->cl, txmsg);
722*4882a593Smuzhiyun if (ret)
723*4882a593Smuzhiyun cs_hsi_data_write_error(hi, txmsg);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return ret;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun error:
728*4882a593Smuzhiyun spin_unlock(&hi->lock);
729*4882a593Smuzhiyun if (ret == -EIO)
730*4882a593Smuzhiyun cs_hsi_data_write_error(hi, hi->data_tx_msg);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun return ret;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
cs_hsi_get_state(struct cs_hsi_iface * hi)735*4882a593Smuzhiyun static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun return hi->iface_state;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
cs_hsi_command(struct cs_hsi_iface * hi,u32 cmd)740*4882a593Smuzhiyun static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun int ret = 0;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun local_bh_disable();
745*4882a593Smuzhiyun switch (cmd & TARGET_MASK) {
746*4882a593Smuzhiyun case TARGET_REMOTE:
747*4882a593Smuzhiyun ret = cs_hsi_write_on_control(hi, cmd);
748*4882a593Smuzhiyun break;
749*4882a593Smuzhiyun case TARGET_LOCAL:
750*4882a593Smuzhiyun if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
751*4882a593Smuzhiyun ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
752*4882a593Smuzhiyun else
753*4882a593Smuzhiyun ret = -EINVAL;
754*4882a593Smuzhiyun break;
755*4882a593Smuzhiyun default:
756*4882a593Smuzhiyun ret = -EINVAL;
757*4882a593Smuzhiyun break;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun local_bh_enable();
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun return ret;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
cs_hsi_set_wakeline(struct cs_hsi_iface * hi,bool new_state)764*4882a593Smuzhiyun static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun int change = 0;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
769*4882a593Smuzhiyun if (hi->wakeline_state != new_state) {
770*4882a593Smuzhiyun hi->wakeline_state = new_state;
771*4882a593Smuzhiyun change = 1;
772*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
773*4882a593Smuzhiyun new_state, hi->cl);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (change) {
778*4882a593Smuzhiyun if (new_state)
779*4882a593Smuzhiyun ssip_slave_start_tx(hi->master);
780*4882a593Smuzhiyun else
781*4882a593Smuzhiyun ssip_slave_stop_tx(hi->master);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
785*4882a593Smuzhiyun new_state, hi->cl);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
set_buffer_sizes(struct cs_hsi_iface * hi,int rx_bufs,int tx_bufs)788*4882a593Smuzhiyun static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun hi->rx_bufs = rx_bufs;
791*4882a593Smuzhiyun hi->tx_bufs = tx_bufs;
792*4882a593Smuzhiyun hi->mmap_cfg->rx_bufs = rx_bufs;
793*4882a593Smuzhiyun hi->mmap_cfg->tx_bufs = tx_bufs;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
796*4882a593Smuzhiyun /*
797*4882a593Smuzhiyun * For more robust overrun detection, let the rx
798*4882a593Smuzhiyun * pointer run in range 0..'boundary-1'. Boundary
799*4882a593Smuzhiyun * is a multiple of rx_bufs, and limited in max size
800*4882a593Smuzhiyun * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
801*4882a593Smuzhiyun * calculation.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
804*4882a593Smuzhiyun hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
805*4882a593Smuzhiyun } else {
806*4882a593Smuzhiyun hi->rx_ptr_boundary = hi->rx_bufs;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
check_buf_params(struct cs_hsi_iface * hi,const struct cs_buffer_config * buf_cfg)810*4882a593Smuzhiyun static int check_buf_params(struct cs_hsi_iface *hi,
811*4882a593Smuzhiyun const struct cs_buffer_config *buf_cfg)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
814*4882a593Smuzhiyun (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
815*4882a593Smuzhiyun size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
816*4882a593Smuzhiyun int r = 0;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
819*4882a593Smuzhiyun buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
820*4882a593Smuzhiyun r = -EINVAL;
821*4882a593Smuzhiyun } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
822*4882a593Smuzhiyun dev_err(&hi->cl->device, "No space for the requested buffer "
823*4882a593Smuzhiyun "configuration\n");
824*4882a593Smuzhiyun r = -ENOBUFS;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return r;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /**
831*4882a593Smuzhiyun * Block until pending data transfers have completed.
832*4882a593Smuzhiyun */
cs_hsi_data_sync(struct cs_hsi_iface * hi)833*4882a593Smuzhiyun static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun int r = 0;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (!cs_state_xfer_active(hi->data_state)) {
840*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
841*4882a593Smuzhiyun goto out;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun for (;;) {
845*4882a593Smuzhiyun int s;
846*4882a593Smuzhiyun DEFINE_WAIT(wait);
847*4882a593Smuzhiyun if (!cs_state_xfer_active(hi->data_state))
848*4882a593Smuzhiyun goto out;
849*4882a593Smuzhiyun if (signal_pending(current)) {
850*4882a593Smuzhiyun r = -ERESTARTSYS;
851*4882a593Smuzhiyun goto out;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun /**
854*4882a593Smuzhiyun * prepare_to_wait must be called with hi->lock held
855*4882a593Smuzhiyun * so that callbacks can check for waitqueue_active()
856*4882a593Smuzhiyun */
857*4882a593Smuzhiyun prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
858*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
859*4882a593Smuzhiyun s = schedule_timeout(
860*4882a593Smuzhiyun msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
861*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
862*4882a593Smuzhiyun finish_wait(&hi->datawait, &wait);
863*4882a593Smuzhiyun if (!s) {
864*4882a593Smuzhiyun dev_dbg(&hi->cl->device,
865*4882a593Smuzhiyun "hsi_data_sync timeout after %d ms\n",
866*4882a593Smuzhiyun CS_HSI_TRANSFER_TIMEOUT_MS);
867*4882a593Smuzhiyun r = -EIO;
868*4882a593Smuzhiyun goto out;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun out:
873*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
874*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun return r;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
cs_hsi_data_enable(struct cs_hsi_iface * hi,struct cs_buffer_config * buf_cfg)879*4882a593Smuzhiyun static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
880*4882a593Smuzhiyun struct cs_buffer_config *buf_cfg)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun unsigned int data_start, i;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun BUG_ON(hi->buf_size == 0);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
889*4882a593Smuzhiyun dev_dbg(&hi->cl->device,
890*4882a593Smuzhiyun "setting slot size to %u, buf size %u, align %u\n",
891*4882a593Smuzhiyun hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
894*4882a593Smuzhiyun dev_dbg(&hi->cl->device,
895*4882a593Smuzhiyun "setting data start at %u, cfg block %u, align %u\n",
896*4882a593Smuzhiyun data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
899*4882a593Smuzhiyun hi->rx_offsets[i] = data_start + i * hi->slot_size;
900*4882a593Smuzhiyun hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
901*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
902*4882a593Smuzhiyun i, hi->rx_offsets[i]);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
905*4882a593Smuzhiyun hi->tx_offsets[i] = data_start +
906*4882a593Smuzhiyun (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
907*4882a593Smuzhiyun hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
908*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
909*4882a593Smuzhiyun i, hi->rx_offsets[i]);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun hi->iface_state = CS_STATE_CONFIGURED;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
cs_hsi_data_disable(struct cs_hsi_iface * hi,int old_state)915*4882a593Smuzhiyun static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun if (old_state == CS_STATE_CONFIGURED) {
918*4882a593Smuzhiyun dev_dbg(&hi->cl->device,
919*4882a593Smuzhiyun "closing data channel with slot size 0\n");
920*4882a593Smuzhiyun hi->iface_state = CS_STATE_OPENED;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
cs_hsi_buf_config(struct cs_hsi_iface * hi,struct cs_buffer_config * buf_cfg)924*4882a593Smuzhiyun static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
925*4882a593Smuzhiyun struct cs_buffer_config *buf_cfg)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun int r = 0;
928*4882a593Smuzhiyun unsigned int old_state = hi->iface_state;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
931*4882a593Smuzhiyun /* Prevent new transactions during buffer reconfig */
932*4882a593Smuzhiyun if (old_state == CS_STATE_CONFIGURED)
933*4882a593Smuzhiyun hi->iface_state = CS_STATE_OPENED;
934*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /*
937*4882a593Smuzhiyun * make sure that no non-zero data reads are ongoing before
938*4882a593Smuzhiyun * proceeding to change the buffer layout
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun r = cs_hsi_data_sync(hi);
941*4882a593Smuzhiyun if (r < 0)
942*4882a593Smuzhiyun return r;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun WARN_ON(cs_state_xfer_active(hi->data_state));
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
947*4882a593Smuzhiyun r = check_buf_params(hi, buf_cfg);
948*4882a593Smuzhiyun if (r < 0)
949*4882a593Smuzhiyun goto error;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun hi->buf_size = buf_cfg->buf_size;
952*4882a593Smuzhiyun hi->mmap_cfg->buf_size = hi->buf_size;
953*4882a593Smuzhiyun hi->flags = buf_cfg->flags;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun hi->rx_slot = 0;
956*4882a593Smuzhiyun hi->tx_slot = 0;
957*4882a593Smuzhiyun hi->slot_size = 0;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun if (hi->buf_size)
960*4882a593Smuzhiyun cs_hsi_data_enable(hi, buf_cfg);
961*4882a593Smuzhiyun else
962*4882a593Smuzhiyun cs_hsi_data_disable(hi, old_state);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (old_state != hi->iface_state) {
967*4882a593Smuzhiyun if (hi->iface_state == CS_STATE_CONFIGURED) {
968*4882a593Smuzhiyun cpu_latency_qos_add_request(&hi->pm_qos_req,
969*4882a593Smuzhiyun CS_QOS_LATENCY_FOR_DATA_USEC);
970*4882a593Smuzhiyun local_bh_disable();
971*4882a593Smuzhiyun cs_hsi_read_on_data(hi);
972*4882a593Smuzhiyun local_bh_enable();
973*4882a593Smuzhiyun } else if (old_state == CS_STATE_CONFIGURED) {
974*4882a593Smuzhiyun cpu_latency_qos_remove_request(&hi->pm_qos_req);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun return r;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun error:
980*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
981*4882a593Smuzhiyun return r;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
cs_hsi_start(struct cs_hsi_iface ** hi,struct hsi_client * cl,unsigned long mmap_base,unsigned long mmap_size)984*4882a593Smuzhiyun static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
985*4882a593Smuzhiyun unsigned long mmap_base, unsigned long mmap_size)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun int err = 0;
988*4882a593Smuzhiyun struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun dev_dbg(&cl->device, "cs_hsi_start\n");
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (!hsi_if) {
993*4882a593Smuzhiyun err = -ENOMEM;
994*4882a593Smuzhiyun goto leave0;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun spin_lock_init(&hsi_if->lock);
997*4882a593Smuzhiyun hsi_if->cl = cl;
998*4882a593Smuzhiyun hsi_if->iface_state = CS_STATE_CLOSED;
999*4882a593Smuzhiyun hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
1000*4882a593Smuzhiyun hsi_if->mmap_base = mmap_base;
1001*4882a593Smuzhiyun hsi_if->mmap_size = mmap_size;
1002*4882a593Smuzhiyun memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
1003*4882a593Smuzhiyun init_waitqueue_head(&hsi_if->datawait);
1004*4882a593Smuzhiyun err = cs_alloc_cmds(hsi_if);
1005*4882a593Smuzhiyun if (err < 0) {
1006*4882a593Smuzhiyun dev_err(&cl->device, "Unable to alloc HSI messages\n");
1007*4882a593Smuzhiyun goto leave1;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun err = cs_hsi_alloc_data(hsi_if);
1010*4882a593Smuzhiyun if (err < 0) {
1011*4882a593Smuzhiyun dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
1012*4882a593Smuzhiyun goto leave2;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun err = hsi_claim_port(cl, 1);
1015*4882a593Smuzhiyun if (err < 0) {
1016*4882a593Smuzhiyun dev_err(&cl->device,
1017*4882a593Smuzhiyun "Could not open, HSI port already claimed\n");
1018*4882a593Smuzhiyun goto leave3;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun hsi_if->master = ssip_slave_get_master(cl);
1021*4882a593Smuzhiyun if (IS_ERR(hsi_if->master)) {
1022*4882a593Smuzhiyun err = PTR_ERR(hsi_if->master);
1023*4882a593Smuzhiyun dev_err(&cl->device, "Could not get HSI master client\n");
1024*4882a593Smuzhiyun goto leave4;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun if (!ssip_slave_running(hsi_if->master)) {
1027*4882a593Smuzhiyun err = -ENODEV;
1028*4882a593Smuzhiyun dev_err(&cl->device,
1029*4882a593Smuzhiyun "HSI port not initialized\n");
1030*4882a593Smuzhiyun goto leave4;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun hsi_if->iface_state = CS_STATE_OPENED;
1034*4882a593Smuzhiyun local_bh_disable();
1035*4882a593Smuzhiyun cs_hsi_read_on_control(hsi_if);
1036*4882a593Smuzhiyun local_bh_enable();
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun dev_dbg(&cl->device, "cs_hsi_start...done\n");
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun BUG_ON(!hi);
1041*4882a593Smuzhiyun *hi = hsi_if;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun return 0;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun leave4:
1046*4882a593Smuzhiyun hsi_release_port(cl);
1047*4882a593Smuzhiyun leave3:
1048*4882a593Smuzhiyun cs_hsi_free_data(hsi_if);
1049*4882a593Smuzhiyun leave2:
1050*4882a593Smuzhiyun cs_free_cmds(hsi_if);
1051*4882a593Smuzhiyun leave1:
1052*4882a593Smuzhiyun kfree(hsi_if);
1053*4882a593Smuzhiyun leave0:
1054*4882a593Smuzhiyun dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun return err;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
cs_hsi_stop(struct cs_hsi_iface * hi)1059*4882a593Smuzhiyun static void cs_hsi_stop(struct cs_hsi_iface *hi)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1062*4882a593Smuzhiyun cs_hsi_set_wakeline(hi, 0);
1063*4882a593Smuzhiyun ssip_slave_put_master(hi->master);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
1066*4882a593Smuzhiyun hi->iface_state = CS_STATE_CLOSED;
1067*4882a593Smuzhiyun hsi_release_port(hi->cl);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /*
1070*4882a593Smuzhiyun * hsi_release_port() should flush out all the pending
1071*4882a593Smuzhiyun * messages, so cs_state_idle() should be true for both
1072*4882a593Smuzhiyun * control and data channels.
1073*4882a593Smuzhiyun */
1074*4882a593Smuzhiyun WARN_ON(!cs_state_idle(hi->control_state));
1075*4882a593Smuzhiyun WARN_ON(!cs_state_idle(hi->data_state));
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (cpu_latency_qos_request_active(&hi->pm_qos_req))
1078*4882a593Smuzhiyun cpu_latency_qos_remove_request(&hi->pm_qos_req);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun spin_lock_bh(&hi->lock);
1081*4882a593Smuzhiyun cs_hsi_free_data(hi);
1082*4882a593Smuzhiyun cs_free_cmds(hi);
1083*4882a593Smuzhiyun spin_unlock_bh(&hi->lock);
1084*4882a593Smuzhiyun kfree(hi);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
cs_char_vma_fault(struct vm_fault * vmf)1087*4882a593Smuzhiyun static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct cs_char *csdata = vmf->vma->vm_private_data;
1090*4882a593Smuzhiyun struct page *page;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun page = virt_to_page(csdata->mmap_base);
1093*4882a593Smuzhiyun get_page(page);
1094*4882a593Smuzhiyun vmf->page = page;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun return 0;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun static const struct vm_operations_struct cs_char_vm_ops = {
1100*4882a593Smuzhiyun .fault = cs_char_vma_fault,
1101*4882a593Smuzhiyun };
1102*4882a593Smuzhiyun
cs_char_fasync(int fd,struct file * file,int on)1103*4882a593Smuzhiyun static int cs_char_fasync(int fd, struct file *file, int on)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
1108*4882a593Smuzhiyun return -EIO;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun return 0;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
cs_char_poll(struct file * file,poll_table * wait)1113*4882a593Smuzhiyun static __poll_t cs_char_poll(struct file *file, poll_table *wait)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1116*4882a593Smuzhiyun __poll_t ret = 0;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun poll_wait(file, &cs_char_data.wait, wait);
1119*4882a593Smuzhiyun spin_lock_bh(&csdata->lock);
1120*4882a593Smuzhiyun if (!list_empty(&csdata->chardev_queue))
1121*4882a593Smuzhiyun ret = EPOLLIN | EPOLLRDNORM;
1122*4882a593Smuzhiyun else if (!list_empty(&csdata->dataind_queue))
1123*4882a593Smuzhiyun ret = EPOLLIN | EPOLLRDNORM;
1124*4882a593Smuzhiyun spin_unlock_bh(&csdata->lock);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun return ret;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
cs_char_read(struct file * file,char __user * buf,size_t count,loff_t * unused)1129*4882a593Smuzhiyun static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
1130*4882a593Smuzhiyun loff_t *unused)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1133*4882a593Smuzhiyun u32 data;
1134*4882a593Smuzhiyun ssize_t retval;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun if (count < sizeof(data))
1137*4882a593Smuzhiyun return -EINVAL;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun for (;;) {
1140*4882a593Smuzhiyun DEFINE_WAIT(wait);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun spin_lock_bh(&csdata->lock);
1143*4882a593Smuzhiyun if (!list_empty(&csdata->chardev_queue)) {
1144*4882a593Smuzhiyun data = cs_pop_entry(&csdata->chardev_queue);
1145*4882a593Smuzhiyun } else if (!list_empty(&csdata->dataind_queue)) {
1146*4882a593Smuzhiyun data = cs_pop_entry(&csdata->dataind_queue);
1147*4882a593Smuzhiyun csdata->dataind_pending--;
1148*4882a593Smuzhiyun } else {
1149*4882a593Smuzhiyun data = 0;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun spin_unlock_bh(&csdata->lock);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun if (data)
1154*4882a593Smuzhiyun break;
1155*4882a593Smuzhiyun if (file->f_flags & O_NONBLOCK) {
1156*4882a593Smuzhiyun retval = -EAGAIN;
1157*4882a593Smuzhiyun goto out;
1158*4882a593Smuzhiyun } else if (signal_pending(current)) {
1159*4882a593Smuzhiyun retval = -ERESTARTSYS;
1160*4882a593Smuzhiyun goto out;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun prepare_to_wait_exclusive(&csdata->wait, &wait,
1163*4882a593Smuzhiyun TASK_INTERRUPTIBLE);
1164*4882a593Smuzhiyun schedule();
1165*4882a593Smuzhiyun finish_wait(&csdata->wait, &wait);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun retval = put_user(data, (u32 __user *)buf);
1169*4882a593Smuzhiyun if (!retval)
1170*4882a593Smuzhiyun retval = sizeof(data);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun out:
1173*4882a593Smuzhiyun return retval;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
cs_char_write(struct file * file,const char __user * buf,size_t count,loff_t * unused)1176*4882a593Smuzhiyun static ssize_t cs_char_write(struct file *file, const char __user *buf,
1177*4882a593Smuzhiyun size_t count, loff_t *unused)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1180*4882a593Smuzhiyun u32 data;
1181*4882a593Smuzhiyun int err;
1182*4882a593Smuzhiyun ssize_t retval;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if (count < sizeof(data))
1185*4882a593Smuzhiyun return -EINVAL;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (get_user(data, (u32 __user *)buf))
1188*4882a593Smuzhiyun retval = -EFAULT;
1189*4882a593Smuzhiyun else
1190*4882a593Smuzhiyun retval = count;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun err = cs_hsi_command(csdata->hi, data);
1193*4882a593Smuzhiyun if (err < 0)
1194*4882a593Smuzhiyun retval = err;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun return retval;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun
cs_char_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1199*4882a593Smuzhiyun static long cs_char_ioctl(struct file *file, unsigned int cmd,
1200*4882a593Smuzhiyun unsigned long arg)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1203*4882a593Smuzhiyun int r = 0;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun switch (cmd) {
1206*4882a593Smuzhiyun case CS_GET_STATE: {
1207*4882a593Smuzhiyun unsigned int state;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun state = cs_hsi_get_state(csdata->hi);
1210*4882a593Smuzhiyun if (copy_to_user((void __user *)arg, &state, sizeof(state)))
1211*4882a593Smuzhiyun r = -EFAULT;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun break;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun case CS_SET_WAKELINE: {
1216*4882a593Smuzhiyun unsigned int state;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
1219*4882a593Smuzhiyun r = -EFAULT;
1220*4882a593Smuzhiyun break;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (state > 1) {
1224*4882a593Smuzhiyun r = -EINVAL;
1225*4882a593Smuzhiyun break;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun cs_hsi_set_wakeline(csdata->hi, !!state);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun break;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun case CS_GET_IF_VERSION: {
1233*4882a593Smuzhiyun unsigned int ifver = CS_IF_VERSION;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
1236*4882a593Smuzhiyun r = -EFAULT;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun break;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun case CS_CONFIG_BUFS: {
1241*4882a593Smuzhiyun struct cs_buffer_config buf_cfg;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun if (copy_from_user(&buf_cfg, (void __user *)arg,
1244*4882a593Smuzhiyun sizeof(buf_cfg)))
1245*4882a593Smuzhiyun r = -EFAULT;
1246*4882a593Smuzhiyun else
1247*4882a593Smuzhiyun r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun break;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun default:
1252*4882a593Smuzhiyun r = -ENOTTY;
1253*4882a593Smuzhiyun break;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return r;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
cs_char_mmap(struct file * file,struct vm_area_struct * vma)1259*4882a593Smuzhiyun static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun if (vma->vm_end < vma->vm_start)
1262*4882a593Smuzhiyun return -EINVAL;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun if (vma_pages(vma) != 1)
1265*4882a593Smuzhiyun return -EINVAL;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
1268*4882a593Smuzhiyun vma->vm_ops = &cs_char_vm_ops;
1269*4882a593Smuzhiyun vma->vm_private_data = file->private_data;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun return 0;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
cs_char_open(struct inode * unused,struct file * file)1274*4882a593Smuzhiyun static int cs_char_open(struct inode *unused, struct file *file)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun int ret = 0;
1277*4882a593Smuzhiyun unsigned long p;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun spin_lock_bh(&cs_char_data.lock);
1280*4882a593Smuzhiyun if (cs_char_data.opened) {
1281*4882a593Smuzhiyun ret = -EBUSY;
1282*4882a593Smuzhiyun spin_unlock_bh(&cs_char_data.lock);
1283*4882a593Smuzhiyun goto out1;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun cs_char_data.opened = 1;
1286*4882a593Smuzhiyun cs_char_data.dataind_pending = 0;
1287*4882a593Smuzhiyun spin_unlock_bh(&cs_char_data.lock);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun p = get_zeroed_page(GFP_KERNEL);
1290*4882a593Smuzhiyun if (!p) {
1291*4882a593Smuzhiyun ret = -ENOMEM;
1292*4882a593Smuzhiyun goto out2;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1296*4882a593Smuzhiyun if (ret) {
1297*4882a593Smuzhiyun dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
1298*4882a593Smuzhiyun goto out3;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /* these are only used in release so lock not needed */
1302*4882a593Smuzhiyun cs_char_data.mmap_base = p;
1303*4882a593Smuzhiyun cs_char_data.mmap_size = CS_MMAP_SIZE;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun file->private_data = &cs_char_data;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return 0;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun out3:
1310*4882a593Smuzhiyun free_page(p);
1311*4882a593Smuzhiyun out2:
1312*4882a593Smuzhiyun spin_lock_bh(&cs_char_data.lock);
1313*4882a593Smuzhiyun cs_char_data.opened = 0;
1314*4882a593Smuzhiyun spin_unlock_bh(&cs_char_data.lock);
1315*4882a593Smuzhiyun out1:
1316*4882a593Smuzhiyun return ret;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
cs_free_char_queue(struct list_head * head)1319*4882a593Smuzhiyun static void cs_free_char_queue(struct list_head *head)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun struct char_queue *entry;
1322*4882a593Smuzhiyun struct list_head *cursor, *next;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (!list_empty(head)) {
1325*4882a593Smuzhiyun list_for_each_safe(cursor, next, head) {
1326*4882a593Smuzhiyun entry = list_entry(cursor, struct char_queue, list);
1327*4882a593Smuzhiyun list_del(&entry->list);
1328*4882a593Smuzhiyun kfree(entry);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
cs_char_release(struct inode * unused,struct file * file)1334*4882a593Smuzhiyun static int cs_char_release(struct inode *unused, struct file *file)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun struct cs_char *csdata = file->private_data;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun cs_hsi_stop(csdata->hi);
1339*4882a593Smuzhiyun spin_lock_bh(&csdata->lock);
1340*4882a593Smuzhiyun csdata->hi = NULL;
1341*4882a593Smuzhiyun free_page(csdata->mmap_base);
1342*4882a593Smuzhiyun cs_free_char_queue(&csdata->chardev_queue);
1343*4882a593Smuzhiyun cs_free_char_queue(&csdata->dataind_queue);
1344*4882a593Smuzhiyun csdata->opened = 0;
1345*4882a593Smuzhiyun spin_unlock_bh(&csdata->lock);
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun return 0;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun static const struct file_operations cs_char_fops = {
1351*4882a593Smuzhiyun .owner = THIS_MODULE,
1352*4882a593Smuzhiyun .read = cs_char_read,
1353*4882a593Smuzhiyun .write = cs_char_write,
1354*4882a593Smuzhiyun .poll = cs_char_poll,
1355*4882a593Smuzhiyun .unlocked_ioctl = cs_char_ioctl,
1356*4882a593Smuzhiyun .mmap = cs_char_mmap,
1357*4882a593Smuzhiyun .open = cs_char_open,
1358*4882a593Smuzhiyun .release = cs_char_release,
1359*4882a593Smuzhiyun .fasync = cs_char_fasync,
1360*4882a593Smuzhiyun };
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun static struct miscdevice cs_char_miscdev = {
1363*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
1364*4882a593Smuzhiyun .name = "cmt_speech",
1365*4882a593Smuzhiyun .fops = &cs_char_fops
1366*4882a593Smuzhiyun };
1367*4882a593Smuzhiyun
cs_hsi_client_probe(struct device * dev)1368*4882a593Smuzhiyun static int cs_hsi_client_probe(struct device *dev)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun int err = 0;
1371*4882a593Smuzhiyun struct hsi_client *cl = to_hsi_client(dev);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun dev_dbg(dev, "hsi_client_probe\n");
1374*4882a593Smuzhiyun init_waitqueue_head(&cs_char_data.wait);
1375*4882a593Smuzhiyun spin_lock_init(&cs_char_data.lock);
1376*4882a593Smuzhiyun cs_char_data.opened = 0;
1377*4882a593Smuzhiyun cs_char_data.cl = cl;
1378*4882a593Smuzhiyun cs_char_data.hi = NULL;
1379*4882a593Smuzhiyun INIT_LIST_HEAD(&cs_char_data.chardev_queue);
1380*4882a593Smuzhiyun INIT_LIST_HEAD(&cs_char_data.dataind_queue);
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
1383*4882a593Smuzhiyun "speech-control");
1384*4882a593Smuzhiyun if (cs_char_data.channel_id_cmd < 0) {
1385*4882a593Smuzhiyun err = cs_char_data.channel_id_cmd;
1386*4882a593Smuzhiyun dev_err(dev, "Could not get cmd channel (%d)\n", err);
1387*4882a593Smuzhiyun return err;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
1391*4882a593Smuzhiyun "speech-data");
1392*4882a593Smuzhiyun if (cs_char_data.channel_id_data < 0) {
1393*4882a593Smuzhiyun err = cs_char_data.channel_id_data;
1394*4882a593Smuzhiyun dev_err(dev, "Could not get data channel (%d)\n", err);
1395*4882a593Smuzhiyun return err;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun err = misc_register(&cs_char_miscdev);
1399*4882a593Smuzhiyun if (err)
1400*4882a593Smuzhiyun dev_err(dev, "Failed to register: %d\n", err);
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun return err;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
cs_hsi_client_remove(struct device * dev)1405*4882a593Smuzhiyun static int cs_hsi_client_remove(struct device *dev)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun struct cs_hsi_iface *hi;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun dev_dbg(dev, "hsi_client_remove\n");
1410*4882a593Smuzhiyun misc_deregister(&cs_char_miscdev);
1411*4882a593Smuzhiyun spin_lock_bh(&cs_char_data.lock);
1412*4882a593Smuzhiyun hi = cs_char_data.hi;
1413*4882a593Smuzhiyun cs_char_data.hi = NULL;
1414*4882a593Smuzhiyun spin_unlock_bh(&cs_char_data.lock);
1415*4882a593Smuzhiyun if (hi)
1416*4882a593Smuzhiyun cs_hsi_stop(hi);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun return 0;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun static struct hsi_client_driver cs_hsi_driver = {
1422*4882a593Smuzhiyun .driver = {
1423*4882a593Smuzhiyun .name = "cmt-speech",
1424*4882a593Smuzhiyun .owner = THIS_MODULE,
1425*4882a593Smuzhiyun .probe = cs_hsi_client_probe,
1426*4882a593Smuzhiyun .remove = cs_hsi_client_remove,
1427*4882a593Smuzhiyun },
1428*4882a593Smuzhiyun };
1429*4882a593Smuzhiyun
cs_char_init(void)1430*4882a593Smuzhiyun static int __init cs_char_init(void)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun pr_info("CMT speech driver added\n");
1433*4882a593Smuzhiyun return hsi_register_client_driver(&cs_hsi_driver);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun module_init(cs_char_init);
1436*4882a593Smuzhiyun
cs_char_exit(void)1437*4882a593Smuzhiyun static void __exit cs_char_exit(void)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun hsi_unregister_client_driver(&cs_hsi_driver);
1440*4882a593Smuzhiyun pr_info("CMT speech driver removed\n");
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun module_exit(cs_char_exit);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun MODULE_ALIAS("hsi:cmt-speech");
1445*4882a593Smuzhiyun MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
1446*4882a593Smuzhiyun MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
1447*4882a593Smuzhiyun MODULE_DESCRIPTION("CMT speech driver");
1448*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1449