1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * For transport using shared mem structure.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2019 ARM Ltd.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/processor.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "common.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * SCMI specification requires all parameters, message headers, return
16*4882a593Smuzhiyun * arguments or any protocol data to be expressed in little endian
17*4882a593Smuzhiyun * format only.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun struct scmi_shared_mem {
20*4882a593Smuzhiyun __le32 reserved;
21*4882a593Smuzhiyun __le32 channel_status;
22*4882a593Smuzhiyun #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
23*4882a593Smuzhiyun #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
24*4882a593Smuzhiyun __le32 reserved1[2];
25*4882a593Smuzhiyun __le32 flags;
26*4882a593Smuzhiyun #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
27*4882a593Smuzhiyun __le32 length;
28*4882a593Smuzhiyun __le32 msg_header;
29*4882a593Smuzhiyun u8 msg_payload[];
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
shmem_tx_prepare(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)32*4882a593Smuzhiyun void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
33*4882a593Smuzhiyun struct scmi_xfer *xfer)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Ideally channel must be free by now unless OS timeout last
37*4882a593Smuzhiyun * request and platform continued to process the same, wait
38*4882a593Smuzhiyun * until it releases the shared memory, otherwise we may endup
39*4882a593Smuzhiyun * overwriting its response with new message payload or vice-versa
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun spin_until_cond(ioread32(&shmem->channel_status) &
42*4882a593Smuzhiyun SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
43*4882a593Smuzhiyun /* Mark channel busy + clear error */
44*4882a593Smuzhiyun iowrite32(0x0, &shmem->channel_status);
45*4882a593Smuzhiyun iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
46*4882a593Smuzhiyun &shmem->flags);
47*4882a593Smuzhiyun iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
48*4882a593Smuzhiyun iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
49*4882a593Smuzhiyun if (xfer->tx.buf)
50*4882a593Smuzhiyun memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
shmem_read_header(struct scmi_shared_mem __iomem * shmem)53*4882a593Smuzhiyun u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return ioread32(&shmem->msg_header);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
shmem_fetch_response(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)58*4882a593Smuzhiyun void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
59*4882a593Smuzhiyun struct scmi_xfer *xfer)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun xfer->hdr.status = ioread32(shmem->msg_payload);
62*4882a593Smuzhiyun /* Skip the length of header and status in shmem area i.e 8 bytes */
63*4882a593Smuzhiyun xfer->rx.len = min_t(size_t, xfer->rx.len,
64*4882a593Smuzhiyun ioread32(&shmem->length) - 8);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Take a copy to the rx buffer.. */
67*4882a593Smuzhiyun memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
shmem_fetch_notification(struct scmi_shared_mem __iomem * shmem,size_t max_len,struct scmi_xfer * xfer)70*4882a593Smuzhiyun void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
71*4882a593Smuzhiyun size_t max_len, struct scmi_xfer *xfer)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun /* Skip only the length of header in shmem area i.e 4 bytes */
74*4882a593Smuzhiyun xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Take a copy to the rx buffer.. */
77*4882a593Smuzhiyun memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
shmem_clear_channel(struct scmi_shared_mem __iomem * shmem)80*4882a593Smuzhiyun void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
shmem_poll_done(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)85*4882a593Smuzhiyun bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
86*4882a593Smuzhiyun struct scmi_xfer *xfer)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun u16 xfer_id;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (xfer->hdr.seq != xfer_id)
93*4882a593Smuzhiyun return false;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return ioread32(&shmem->channel_status) &
96*4882a593Smuzhiyun (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
97*4882a593Smuzhiyun SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
98*4882a593Smuzhiyun }
99