1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun * Intel Management Engine Interface (Intel MEI) Linux driver
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/export.h>
8*4882a593Smuzhiyun #include <linux/kthread.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/jiffies.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/pm_runtime.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/mei.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "mei_dev.h"
18*4882a593Smuzhiyun #include "hbm.h"
19*4882a593Smuzhiyun #include "client.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * mei_irq_compl_handler - dispatch complete handlers
24*4882a593Smuzhiyun * for the completed callbacks
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * @dev: mei device
27*4882a593Smuzhiyun * @cmpl_list: list of completed cbs
28*4882a593Smuzhiyun */
mei_irq_compl_handler(struct mei_device * dev,struct list_head * cmpl_list)29*4882a593Smuzhiyun void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct mei_cl_cb *cb, *next;
32*4882a593Smuzhiyun struct mei_cl *cl;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun list_for_each_entry_safe(cb, next, cmpl_list, list) {
35*4882a593Smuzhiyun cl = cb->cl;
36*4882a593Smuzhiyun list_del_init(&cb->list);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun dev_dbg(dev->dev, "completing call back.\n");
39*4882a593Smuzhiyun mei_cl_complete(cl, cb);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun * mei_cl_hbm_equal - check if hbm is addressed to the client
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * @cl: host client
48*4882a593Smuzhiyun * @mei_hdr: header of mei client message
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Return: true if matches, false otherwise
51*4882a593Smuzhiyun */
mei_cl_hbm_equal(struct mei_cl * cl,struct mei_msg_hdr * mei_hdr)52*4882a593Smuzhiyun static inline int mei_cl_hbm_equal(struct mei_cl *cl,
53*4882a593Smuzhiyun struct mei_msg_hdr *mei_hdr)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
56*4882a593Smuzhiyun mei_cl_me_id(cl) == mei_hdr->me_addr;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun * mei_irq_discard_msg - discard received message
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * @dev: mei device
63*4882a593Smuzhiyun * @hdr: message header
64*4882a593Smuzhiyun * @discard_len: the length of the message to discard (excluding header)
65*4882a593Smuzhiyun */
mei_irq_discard_msg(struct mei_device * dev,struct mei_msg_hdr * hdr,size_t discard_len)66*4882a593Smuzhiyun static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
67*4882a593Smuzhiyun size_t discard_len)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun if (hdr->dma_ring) {
70*4882a593Smuzhiyun mei_dma_ring_read(dev, NULL,
71*4882a593Smuzhiyun hdr->extension[dev->rd_msg_hdr_count - 2]);
72*4882a593Smuzhiyun discard_len = 0;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * no need to check for size as it is guarantied
76*4882a593Smuzhiyun * that length fits into rd_msg_buf
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun mei_read_slots(dev, dev->rd_msg_buf, discard_len);
79*4882a593Smuzhiyun dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
80*4882a593Smuzhiyun MEI_HDR_PRM(hdr));
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun * mei_cl_irq_read_msg - process client message
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * @cl: reading client
87*4882a593Smuzhiyun * @mei_hdr: header of mei client message
88*4882a593Smuzhiyun * @meta: extend meta header
89*4882a593Smuzhiyun * @cmpl_list: completion list
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Return: always 0
92*4882a593Smuzhiyun */
mei_cl_irq_read_msg(struct mei_cl * cl,struct mei_msg_hdr * mei_hdr,struct mei_ext_meta_hdr * meta,struct list_head * cmpl_list)93*4882a593Smuzhiyun static int mei_cl_irq_read_msg(struct mei_cl *cl,
94*4882a593Smuzhiyun struct mei_msg_hdr *mei_hdr,
95*4882a593Smuzhiyun struct mei_ext_meta_hdr *meta,
96*4882a593Smuzhiyun struct list_head *cmpl_list)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct mei_device *dev = cl->dev;
99*4882a593Smuzhiyun struct mei_cl_cb *cb;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun size_t buf_sz;
102*4882a593Smuzhiyun u32 length;
103*4882a593Smuzhiyun int ext_len;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun length = mei_hdr->length;
106*4882a593Smuzhiyun ext_len = 0;
107*4882a593Smuzhiyun if (mei_hdr->extended) {
108*4882a593Smuzhiyun ext_len = sizeof(*meta) + mei_slots2data(meta->size);
109*4882a593Smuzhiyun length -= ext_len;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
113*4882a593Smuzhiyun if (!cb) {
114*4882a593Smuzhiyun if (!mei_cl_is_fixed_address(cl)) {
115*4882a593Smuzhiyun cl_err(dev, cl, "pending read cb not found\n");
116*4882a593Smuzhiyun goto discard;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
119*4882a593Smuzhiyun if (!cb)
120*4882a593Smuzhiyun goto discard;
121*4882a593Smuzhiyun list_add_tail(&cb->list, &cl->rd_pending);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (mei_hdr->extended) {
125*4882a593Smuzhiyun struct mei_ext_hdr *ext;
126*4882a593Smuzhiyun struct mei_ext_hdr *vtag = NULL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun ext = mei_ext_begin(meta);
129*4882a593Smuzhiyun do {
130*4882a593Smuzhiyun switch (ext->type) {
131*4882a593Smuzhiyun case MEI_EXT_HDR_VTAG:
132*4882a593Smuzhiyun vtag = ext;
133*4882a593Smuzhiyun break;
134*4882a593Smuzhiyun case MEI_EXT_HDR_NONE:
135*4882a593Smuzhiyun fallthrough;
136*4882a593Smuzhiyun default:
137*4882a593Smuzhiyun cb->status = -EPROTO;
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun ext = mei_ext_next(ext);
142*4882a593Smuzhiyun } while (!mei_ext_last(meta, ext));
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (!vtag) {
145*4882a593Smuzhiyun cl_dbg(dev, cl, "vtag not found in extended header.\n");
146*4882a593Smuzhiyun cb->status = -EPROTO;
147*4882a593Smuzhiyun goto discard;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun cl_dbg(dev, cl, "vtag: %d\n", vtag->ext_payload[0]);
151*4882a593Smuzhiyun if (cb->vtag && cb->vtag != vtag->ext_payload[0]) {
152*4882a593Smuzhiyun cl_err(dev, cl, "mismatched tag: %d != %d\n",
153*4882a593Smuzhiyun cb->vtag, vtag->ext_payload[0]);
154*4882a593Smuzhiyun cb->status = -EPROTO;
155*4882a593Smuzhiyun goto discard;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun cb->vtag = vtag->ext_payload[0];
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (!mei_cl_is_connected(cl)) {
161*4882a593Smuzhiyun cl_dbg(dev, cl, "not connected\n");
162*4882a593Smuzhiyun cb->status = -ENODEV;
163*4882a593Smuzhiyun goto discard;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (mei_hdr->dma_ring)
167*4882a593Smuzhiyun length = mei_hdr->extension[mei_data2slots(ext_len)];
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun buf_sz = length + cb->buf_idx;
170*4882a593Smuzhiyun /* catch for integer overflow */
171*4882a593Smuzhiyun if (buf_sz < cb->buf_idx) {
172*4882a593Smuzhiyun cl_err(dev, cl, "message is too big len %d idx %zu\n",
173*4882a593Smuzhiyun length, cb->buf_idx);
174*4882a593Smuzhiyun cb->status = -EMSGSIZE;
175*4882a593Smuzhiyun goto discard;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (cb->buf.size < buf_sz) {
179*4882a593Smuzhiyun cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
180*4882a593Smuzhiyun cb->buf.size, length, cb->buf_idx);
181*4882a593Smuzhiyun cb->status = -EMSGSIZE;
182*4882a593Smuzhiyun goto discard;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (mei_hdr->dma_ring) {
186*4882a593Smuzhiyun mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
187*4882a593Smuzhiyun /* for DMA read 0 length to generate interrupt to the device */
188*4882a593Smuzhiyun mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0);
189*4882a593Smuzhiyun } else {
190*4882a593Smuzhiyun mei_read_slots(dev, cb->buf.data + cb->buf_idx, length);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun cb->buf_idx += length;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (mei_hdr->msg_complete) {
196*4882a593Smuzhiyun cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
197*4882a593Smuzhiyun list_move_tail(&cb->list, cmpl_list);
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
200*4882a593Smuzhiyun pm_request_autosuspend(dev->dev);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun discard:
206*4882a593Smuzhiyun if (cb)
207*4882a593Smuzhiyun list_move_tail(&cb->list, cmpl_list);
208*4882a593Smuzhiyun mei_irq_discard_msg(dev, mei_hdr, length);
209*4882a593Smuzhiyun return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun * mei_cl_irq_disconnect_rsp - send disconnection response message
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * @cl: client
216*4882a593Smuzhiyun * @cb: callback block.
217*4882a593Smuzhiyun * @cmpl_list: complete list.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Return: 0, OK; otherwise, error.
220*4882a593Smuzhiyun */
mei_cl_irq_disconnect_rsp(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)221*4882a593Smuzhiyun static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
222*4882a593Smuzhiyun struct list_head *cmpl_list)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct mei_device *dev = cl->dev;
225*4882a593Smuzhiyun u32 msg_slots;
226*4882a593Smuzhiyun int slots;
227*4882a593Smuzhiyun int ret;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
230*4882a593Smuzhiyun slots = mei_hbuf_empty_slots(dev);
231*4882a593Smuzhiyun if (slots < 0)
232*4882a593Smuzhiyun return -EOVERFLOW;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if ((u32)slots < msg_slots)
235*4882a593Smuzhiyun return -EMSGSIZE;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun ret = mei_hbm_cl_disconnect_rsp(dev, cl);
238*4882a593Smuzhiyun list_move_tail(&cb->list, cmpl_list);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return ret;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * mei_cl_irq_read - processes client read related operation from the
245*4882a593Smuzhiyun * interrupt thread context - request for flow control credits
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * @cl: client
248*4882a593Smuzhiyun * @cb: callback block.
249*4882a593Smuzhiyun * @cmpl_list: complete list.
250*4882a593Smuzhiyun *
251*4882a593Smuzhiyun * Return: 0, OK; otherwise, error.
252*4882a593Smuzhiyun */
mei_cl_irq_read(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)253*4882a593Smuzhiyun static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
254*4882a593Smuzhiyun struct list_head *cmpl_list)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct mei_device *dev = cl->dev;
257*4882a593Smuzhiyun u32 msg_slots;
258*4882a593Smuzhiyun int slots;
259*4882a593Smuzhiyun int ret;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (!list_empty(&cl->rd_pending))
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
265*4882a593Smuzhiyun slots = mei_hbuf_empty_slots(dev);
266*4882a593Smuzhiyun if (slots < 0)
267*4882a593Smuzhiyun return -EOVERFLOW;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if ((u32)slots < msg_slots)
270*4882a593Smuzhiyun return -EMSGSIZE;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ret = mei_hbm_cl_flow_control_req(dev, cl);
273*4882a593Smuzhiyun if (ret) {
274*4882a593Smuzhiyun cl->status = ret;
275*4882a593Smuzhiyun cb->buf_idx = 0;
276*4882a593Smuzhiyun list_move_tail(&cb->list, cmpl_list);
277*4882a593Smuzhiyun return ret;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev->dev);
281*4882a593Smuzhiyun pm_request_autosuspend(dev->dev);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun list_move_tail(&cb->list, &cl->rd_pending);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
hdr_is_hbm(struct mei_msg_hdr * mei_hdr)288*4882a593Smuzhiyun static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
hdr_is_fixed(struct mei_msg_hdr * mei_hdr)293*4882a593Smuzhiyun static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
hdr_is_valid(u32 msg_hdr)298*4882a593Smuzhiyun static inline int hdr_is_valid(u32 msg_hdr)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct mei_msg_hdr *mei_hdr;
301*4882a593Smuzhiyun u32 expected_len = 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
304*4882a593Smuzhiyun if (!msg_hdr || mei_hdr->reserved)
305*4882a593Smuzhiyun return -EBADMSG;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (mei_hdr->dma_ring)
308*4882a593Smuzhiyun expected_len += MEI_SLOT_SIZE;
309*4882a593Smuzhiyun if (mei_hdr->extended)
310*4882a593Smuzhiyun expected_len += MEI_SLOT_SIZE;
311*4882a593Smuzhiyun if (mei_hdr->length < expected_len)
312*4882a593Smuzhiyun return -EBADMSG;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /**
318*4882a593Smuzhiyun * mei_irq_read_handler - bottom half read routine after ISR to
319*4882a593Smuzhiyun * handle the read processing.
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * @dev: the device structure
322*4882a593Smuzhiyun * @cmpl_list: An instance of our list structure
323*4882a593Smuzhiyun * @slots: slots to read.
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * Return: 0 on success, <0 on failure.
326*4882a593Smuzhiyun */
mei_irq_read_handler(struct mei_device * dev,struct list_head * cmpl_list,s32 * slots)327*4882a593Smuzhiyun int mei_irq_read_handler(struct mei_device *dev,
328*4882a593Smuzhiyun struct list_head *cmpl_list, s32 *slots)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct mei_msg_hdr *mei_hdr;
331*4882a593Smuzhiyun struct mei_ext_meta_hdr *meta_hdr = NULL;
332*4882a593Smuzhiyun struct mei_cl *cl;
333*4882a593Smuzhiyun int ret;
334*4882a593Smuzhiyun u32 ext_meta_hdr_u32;
335*4882a593Smuzhiyun u32 hdr_size_left;
336*4882a593Smuzhiyun u32 hdr_size_ext;
337*4882a593Smuzhiyun int i;
338*4882a593Smuzhiyun int ext_hdr_end;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!dev->rd_msg_hdr[0]) {
341*4882a593Smuzhiyun dev->rd_msg_hdr[0] = mei_read_hdr(dev);
342*4882a593Smuzhiyun dev->rd_msg_hdr_count = 1;
343*4882a593Smuzhiyun (*slots)--;
344*4882a593Smuzhiyun dev_dbg(dev->dev, "slots =%08x.\n", *slots);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun ret = hdr_is_valid(dev->rd_msg_hdr[0]);
347*4882a593Smuzhiyun if (ret) {
348*4882a593Smuzhiyun dev_err(dev->dev, "corrupted message header 0x%08X\n",
349*4882a593Smuzhiyun dev->rd_msg_hdr[0]);
350*4882a593Smuzhiyun goto end;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
355*4882a593Smuzhiyun dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (mei_slots2data(*slots) < mei_hdr->length) {
358*4882a593Smuzhiyun dev_err(dev->dev, "less data available than length=%08x.\n",
359*4882a593Smuzhiyun *slots);
360*4882a593Smuzhiyun /* we can't read the message */
361*4882a593Smuzhiyun ret = -ENODATA;
362*4882a593Smuzhiyun goto end;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun ext_hdr_end = 1;
366*4882a593Smuzhiyun hdr_size_left = mei_hdr->length;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (mei_hdr->extended) {
369*4882a593Smuzhiyun if (!dev->rd_msg_hdr[1]) {
370*4882a593Smuzhiyun ext_meta_hdr_u32 = mei_read_hdr(dev);
371*4882a593Smuzhiyun dev->rd_msg_hdr[1] = ext_meta_hdr_u32;
372*4882a593Smuzhiyun dev->rd_msg_hdr_count++;
373*4882a593Smuzhiyun (*slots)--;
374*4882a593Smuzhiyun dev_dbg(dev->dev, "extended header is %08x\n",
375*4882a593Smuzhiyun ext_meta_hdr_u32);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
378*4882a593Smuzhiyun if (check_add_overflow((u32)sizeof(*meta_hdr),
379*4882a593Smuzhiyun mei_slots2data(meta_hdr->size),
380*4882a593Smuzhiyun &hdr_size_ext)) {
381*4882a593Smuzhiyun dev_err(dev->dev, "extended message size too big %d\n",
382*4882a593Smuzhiyun meta_hdr->size);
383*4882a593Smuzhiyun return -EBADMSG;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun if (hdr_size_left < hdr_size_ext) {
386*4882a593Smuzhiyun dev_err(dev->dev, "corrupted message header len %d\n",
387*4882a593Smuzhiyun mei_hdr->length);
388*4882a593Smuzhiyun return -EBADMSG;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun hdr_size_left -= hdr_size_ext;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun ext_hdr_end = meta_hdr->size + 2;
393*4882a593Smuzhiyun for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
394*4882a593Smuzhiyun dev->rd_msg_hdr[i] = mei_read_hdr(dev);
395*4882a593Smuzhiyun dev_dbg(dev->dev, "extended header %d is %08x\n", i,
396*4882a593Smuzhiyun dev->rd_msg_hdr[i]);
397*4882a593Smuzhiyun dev->rd_msg_hdr_count++;
398*4882a593Smuzhiyun (*slots)--;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (mei_hdr->dma_ring) {
403*4882a593Smuzhiyun if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
404*4882a593Smuzhiyun dev_err(dev->dev, "corrupted message header len %d\n",
405*4882a593Smuzhiyun mei_hdr->length);
406*4882a593Smuzhiyun return -EBADMSG;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
410*4882a593Smuzhiyun dev->rd_msg_hdr_count++;
411*4882a593Smuzhiyun (*slots)--;
412*4882a593Smuzhiyun mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* HBM message */
416*4882a593Smuzhiyun if (hdr_is_hbm(mei_hdr)) {
417*4882a593Smuzhiyun ret = mei_hbm_dispatch(dev, mei_hdr);
418*4882a593Smuzhiyun if (ret) {
419*4882a593Smuzhiyun dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
420*4882a593Smuzhiyun ret);
421*4882a593Smuzhiyun goto end;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun goto reset_slots;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* find recipient cl */
427*4882a593Smuzhiyun list_for_each_entry(cl, &dev->file_list, link) {
428*4882a593Smuzhiyun if (mei_cl_hbm_equal(cl, mei_hdr)) {
429*4882a593Smuzhiyun cl_dbg(dev, cl, "got a message\n");
430*4882a593Smuzhiyun ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
431*4882a593Smuzhiyun goto reset_slots;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* if no recipient cl was found we assume corrupted header */
436*4882a593Smuzhiyun /* A message for not connected fixed address clients
437*4882a593Smuzhiyun * should be silently discarded
438*4882a593Smuzhiyun * On power down client may be force cleaned,
439*4882a593Smuzhiyun * silently discard such messages
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun if (hdr_is_fixed(mei_hdr) ||
442*4882a593Smuzhiyun dev->dev_state == MEI_DEV_POWER_DOWN) {
443*4882a593Smuzhiyun mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
444*4882a593Smuzhiyun ret = 0;
445*4882a593Smuzhiyun goto reset_slots;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
448*4882a593Smuzhiyun ret = -EBADMSG;
449*4882a593Smuzhiyun goto end;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun reset_slots:
452*4882a593Smuzhiyun /* reset the number of slots and header */
453*4882a593Smuzhiyun memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
454*4882a593Smuzhiyun dev->rd_msg_hdr_count = 0;
455*4882a593Smuzhiyun *slots = mei_count_full_read_slots(dev);
456*4882a593Smuzhiyun if (*slots == -EOVERFLOW) {
457*4882a593Smuzhiyun /* overflow - reset */
458*4882a593Smuzhiyun dev_err(dev->dev, "resetting due to slots overflow.\n");
459*4882a593Smuzhiyun /* set the event since message has been read */
460*4882a593Smuzhiyun ret = -ERANGE;
461*4882a593Smuzhiyun goto end;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun end:
464*4882a593Smuzhiyun return ret;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mei_irq_read_handler);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /**
470*4882a593Smuzhiyun * mei_irq_write_handler - dispatch write requests
471*4882a593Smuzhiyun * after irq received
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * @dev: the device structure
474*4882a593Smuzhiyun * @cmpl_list: An instance of our list structure
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * Return: 0 on success, <0 on failure.
477*4882a593Smuzhiyun */
mei_irq_write_handler(struct mei_device * dev,struct list_head * cmpl_list)478*4882a593Smuzhiyun int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun struct mei_cl *cl;
482*4882a593Smuzhiyun struct mei_cl_cb *cb, *next;
483*4882a593Smuzhiyun s32 slots;
484*4882a593Smuzhiyun int ret;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (!mei_hbuf_acquire(dev))
488*4882a593Smuzhiyun return 0;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun slots = mei_hbuf_empty_slots(dev);
491*4882a593Smuzhiyun if (slots < 0)
492*4882a593Smuzhiyun return -EOVERFLOW;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (slots == 0)
495*4882a593Smuzhiyun return -EMSGSIZE;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* complete all waiting for write CB */
498*4882a593Smuzhiyun dev_dbg(dev->dev, "complete all waiting for write cb.\n");
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
501*4882a593Smuzhiyun cl = cb->cl;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun cl->status = 0;
504*4882a593Smuzhiyun cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
505*4882a593Smuzhiyun cl->writing_state = MEI_WRITE_COMPLETE;
506*4882a593Smuzhiyun list_move_tail(&cb->list, cmpl_list);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* complete control write list CB */
510*4882a593Smuzhiyun dev_dbg(dev->dev, "complete control write list cb.\n");
511*4882a593Smuzhiyun list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
512*4882a593Smuzhiyun cl = cb->cl;
513*4882a593Smuzhiyun switch (cb->fop_type) {
514*4882a593Smuzhiyun case MEI_FOP_DISCONNECT:
515*4882a593Smuzhiyun /* send disconnect message */
516*4882a593Smuzhiyun ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
517*4882a593Smuzhiyun if (ret)
518*4882a593Smuzhiyun return ret;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun case MEI_FOP_READ:
522*4882a593Smuzhiyun /* send flow control message */
523*4882a593Smuzhiyun ret = mei_cl_irq_read(cl, cb, cmpl_list);
524*4882a593Smuzhiyun if (ret)
525*4882a593Smuzhiyun return ret;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun break;
528*4882a593Smuzhiyun case MEI_FOP_CONNECT:
529*4882a593Smuzhiyun /* connect message */
530*4882a593Smuzhiyun ret = mei_cl_irq_connect(cl, cb, cmpl_list);
531*4882a593Smuzhiyun if (ret)
532*4882a593Smuzhiyun return ret;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun break;
535*4882a593Smuzhiyun case MEI_FOP_DISCONNECT_RSP:
536*4882a593Smuzhiyun /* send disconnect resp */
537*4882a593Smuzhiyun ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
538*4882a593Smuzhiyun if (ret)
539*4882a593Smuzhiyun return ret;
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun case MEI_FOP_NOTIFY_START:
543*4882a593Smuzhiyun case MEI_FOP_NOTIFY_STOP:
544*4882a593Smuzhiyun ret = mei_cl_irq_notify(cl, cb, cmpl_list);
545*4882a593Smuzhiyun if (ret)
546*4882a593Smuzhiyun return ret;
547*4882a593Smuzhiyun break;
548*4882a593Smuzhiyun default:
549*4882a593Smuzhiyun BUG();
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun /* complete write list CB */
554*4882a593Smuzhiyun dev_dbg(dev->dev, "complete write list cb.\n");
555*4882a593Smuzhiyun list_for_each_entry_safe(cb, next, &dev->write_list, list) {
556*4882a593Smuzhiyun cl = cb->cl;
557*4882a593Smuzhiyun ret = mei_cl_irq_write(cl, cb, cmpl_list);
558*4882a593Smuzhiyun if (ret)
559*4882a593Smuzhiyun return ret;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun return 0;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mei_irq_write_handler);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * mei_connect_timeout - connect/disconnect timeouts
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * @cl: host client
570*4882a593Smuzhiyun */
mei_connect_timeout(struct mei_cl * cl)571*4882a593Smuzhiyun static void mei_connect_timeout(struct mei_cl *cl)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct mei_device *dev = cl->dev;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (cl->state == MEI_FILE_CONNECTING) {
576*4882a593Smuzhiyun if (dev->hbm_f_dot_supported) {
577*4882a593Smuzhiyun cl->state = MEI_FILE_DISCONNECT_REQUIRED;
578*4882a593Smuzhiyun wake_up(&cl->wait);
579*4882a593Smuzhiyun return;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun mei_reset(dev);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun #define MEI_STALL_TIMER_FREQ (2 * HZ)
586*4882a593Smuzhiyun /**
587*4882a593Smuzhiyun * mei_schedule_stall_timer - re-arm stall_timer work
588*4882a593Smuzhiyun *
589*4882a593Smuzhiyun * Schedule stall timer
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * @dev: the device structure
592*4882a593Smuzhiyun */
mei_schedule_stall_timer(struct mei_device * dev)593*4882a593Smuzhiyun void mei_schedule_stall_timer(struct mei_device *dev)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /**
599*4882a593Smuzhiyun * mei_timer - timer function.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * @work: pointer to the work_struct structure
602*4882a593Smuzhiyun *
603*4882a593Smuzhiyun */
mei_timer(struct work_struct * work)604*4882a593Smuzhiyun void mei_timer(struct work_struct *work)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct mei_cl *cl;
607*4882a593Smuzhiyun struct mei_device *dev = container_of(work,
608*4882a593Smuzhiyun struct mei_device, timer_work.work);
609*4882a593Smuzhiyun bool reschedule_timer = false;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun mutex_lock(&dev->device_lock);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Catch interrupt stalls during HBM init handshake */
614*4882a593Smuzhiyun if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
615*4882a593Smuzhiyun dev->hbm_state != MEI_HBM_IDLE) {
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (dev->init_clients_timer) {
618*4882a593Smuzhiyun if (--dev->init_clients_timer == 0) {
619*4882a593Smuzhiyun dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
620*4882a593Smuzhiyun dev->hbm_state);
621*4882a593Smuzhiyun mei_reset(dev);
622*4882a593Smuzhiyun goto out;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun reschedule_timer = true;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (dev->dev_state != MEI_DEV_ENABLED)
629*4882a593Smuzhiyun goto out;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /*** connect/disconnect timeouts ***/
632*4882a593Smuzhiyun list_for_each_entry(cl, &dev->file_list, link) {
633*4882a593Smuzhiyun if (cl->timer_count) {
634*4882a593Smuzhiyun if (--cl->timer_count == 0) {
635*4882a593Smuzhiyun dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
636*4882a593Smuzhiyun mei_connect_timeout(cl);
637*4882a593Smuzhiyun goto out;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun reschedule_timer = true;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun out:
644*4882a593Smuzhiyun if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
645*4882a593Smuzhiyun mei_schedule_stall_timer(dev);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun mutex_unlock(&dev->device_lock);
648*4882a593Smuzhiyun }
649