xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/marvell/octeontx2/af/mbox.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Marvell OcteonTx2 RVU Admin Function driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2018 Marvell International Ltd.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun  * it under the terms of the GNU General Public License version 2 as
8*4882a593Smuzhiyun  * published by the Free Software Foundation.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "rvu_reg.h"
16*4882a593Smuzhiyun #include "mbox.h"
17*4882a593Smuzhiyun #include "rvu_trace.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
20*4882a593Smuzhiyun 
__otx2_mbox_reset(struct otx2_mbox * mbox,int devid)21*4882a593Smuzhiyun void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
24*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
25*4882a593Smuzhiyun 	struct mbox_hdr *tx_hdr, *rx_hdr;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	tx_hdr = hw_mbase + mbox->tx_start;
28*4882a593Smuzhiyun 	rx_hdr = hw_mbase + mbox->rx_start;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	mdev->msg_size = 0;
31*4882a593Smuzhiyun 	mdev->rsp_size = 0;
32*4882a593Smuzhiyun 	tx_hdr->num_msgs = 0;
33*4882a593Smuzhiyun 	tx_hdr->msg_size = 0;
34*4882a593Smuzhiyun 	rx_hdr->num_msgs = 0;
35*4882a593Smuzhiyun 	rx_hdr->msg_size = 0;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun EXPORT_SYMBOL(__otx2_mbox_reset);
38*4882a593Smuzhiyun 
otx2_mbox_reset(struct otx2_mbox * mbox,int devid)39*4882a593Smuzhiyun void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
44*4882a593Smuzhiyun 	__otx2_mbox_reset(mbox, devid);
45*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_reset);
48*4882a593Smuzhiyun 
otx2_mbox_destroy(struct otx2_mbox * mbox)49*4882a593Smuzhiyun void otx2_mbox_destroy(struct otx2_mbox *mbox)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	mbox->reg_base = NULL;
52*4882a593Smuzhiyun 	mbox->hwbase = NULL;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	kfree(mbox->dev);
55*4882a593Smuzhiyun 	mbox->dev = NULL;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_destroy);
58*4882a593Smuzhiyun 
otx2_mbox_init(struct otx2_mbox * mbox,void * hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)59*4882a593Smuzhiyun int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
60*4882a593Smuzhiyun 		   void *reg_base, int direction, int ndevs)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev;
63*4882a593Smuzhiyun 	int devid;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	switch (direction) {
66*4882a593Smuzhiyun 	case MBOX_DIR_AFPF:
67*4882a593Smuzhiyun 	case MBOX_DIR_PFVF:
68*4882a593Smuzhiyun 		mbox->tx_start = MBOX_DOWN_TX_START;
69*4882a593Smuzhiyun 		mbox->rx_start = MBOX_DOWN_RX_START;
70*4882a593Smuzhiyun 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
71*4882a593Smuzhiyun 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
72*4882a593Smuzhiyun 		break;
73*4882a593Smuzhiyun 	case MBOX_DIR_PFAF:
74*4882a593Smuzhiyun 	case MBOX_DIR_VFPF:
75*4882a593Smuzhiyun 		mbox->tx_start = MBOX_DOWN_RX_START;
76*4882a593Smuzhiyun 		mbox->rx_start = MBOX_DOWN_TX_START;
77*4882a593Smuzhiyun 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
78*4882a593Smuzhiyun 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
79*4882a593Smuzhiyun 		break;
80*4882a593Smuzhiyun 	case MBOX_DIR_AFPF_UP:
81*4882a593Smuzhiyun 	case MBOX_DIR_PFVF_UP:
82*4882a593Smuzhiyun 		mbox->tx_start = MBOX_UP_TX_START;
83*4882a593Smuzhiyun 		mbox->rx_start = MBOX_UP_RX_START;
84*4882a593Smuzhiyun 		mbox->tx_size  = MBOX_UP_TX_SIZE;
85*4882a593Smuzhiyun 		mbox->rx_size  = MBOX_UP_RX_SIZE;
86*4882a593Smuzhiyun 		break;
87*4882a593Smuzhiyun 	case MBOX_DIR_PFAF_UP:
88*4882a593Smuzhiyun 	case MBOX_DIR_VFPF_UP:
89*4882a593Smuzhiyun 		mbox->tx_start = MBOX_UP_RX_START;
90*4882a593Smuzhiyun 		mbox->rx_start = MBOX_UP_TX_START;
91*4882a593Smuzhiyun 		mbox->tx_size  = MBOX_UP_RX_SIZE;
92*4882a593Smuzhiyun 		mbox->rx_size  = MBOX_UP_TX_SIZE;
93*4882a593Smuzhiyun 		break;
94*4882a593Smuzhiyun 	default:
95*4882a593Smuzhiyun 		return -ENODEV;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	switch (direction) {
99*4882a593Smuzhiyun 	case MBOX_DIR_AFPF:
100*4882a593Smuzhiyun 	case MBOX_DIR_AFPF_UP:
101*4882a593Smuzhiyun 		mbox->trigger = RVU_AF_AFPF_MBOX0;
102*4882a593Smuzhiyun 		mbox->tr_shift = 4;
103*4882a593Smuzhiyun 		break;
104*4882a593Smuzhiyun 	case MBOX_DIR_PFAF:
105*4882a593Smuzhiyun 	case MBOX_DIR_PFAF_UP:
106*4882a593Smuzhiyun 		mbox->trigger = RVU_PF_PFAF_MBOX1;
107*4882a593Smuzhiyun 		mbox->tr_shift = 0;
108*4882a593Smuzhiyun 		break;
109*4882a593Smuzhiyun 	case MBOX_DIR_PFVF:
110*4882a593Smuzhiyun 	case MBOX_DIR_PFVF_UP:
111*4882a593Smuzhiyun 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
112*4882a593Smuzhiyun 		mbox->tr_shift = 12;
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 	case MBOX_DIR_VFPF:
115*4882a593Smuzhiyun 	case MBOX_DIR_VFPF_UP:
116*4882a593Smuzhiyun 		mbox->trigger = RVU_VF_VFPF_MBOX1;
117*4882a593Smuzhiyun 		mbox->tr_shift = 0;
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 	default:
120*4882a593Smuzhiyun 		return -ENODEV;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	mbox->reg_base = reg_base;
124*4882a593Smuzhiyun 	mbox->hwbase = hwbase;
125*4882a593Smuzhiyun 	mbox->pdev = pdev;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
128*4882a593Smuzhiyun 	if (!mbox->dev) {
129*4882a593Smuzhiyun 		otx2_mbox_destroy(mbox);
130*4882a593Smuzhiyun 		return -ENOMEM;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	mbox->ndevs = ndevs;
134*4882a593Smuzhiyun 	for (devid = 0; devid < ndevs; devid++) {
135*4882a593Smuzhiyun 		mdev = &mbox->dev[devid];
136*4882a593Smuzhiyun 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
137*4882a593Smuzhiyun 		spin_lock_init(&mdev->mbox_lock);
138*4882a593Smuzhiyun 		/* Init header to reset value */
139*4882a593Smuzhiyun 		otx2_mbox_reset(mbox, devid);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_init);
145*4882a593Smuzhiyun 
otx2_mbox_wait_for_rsp(struct otx2_mbox * mbox,int devid)146*4882a593Smuzhiyun int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
149*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
150*4882a593Smuzhiyun 	struct device *sender = &mbox->pdev->dev;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	while (!time_after(jiffies, timeout)) {
153*4882a593Smuzhiyun 		if (mdev->num_msgs == mdev->msgs_acked)
154*4882a593Smuzhiyun 			return 0;
155*4882a593Smuzhiyun 		usleep_range(800, 1000);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 	dev_dbg(sender, "timed out while waiting for rsp\n");
158*4882a593Smuzhiyun 	return -EIO;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
161*4882a593Smuzhiyun 
otx2_mbox_busy_poll_for_rsp(struct otx2_mbox * mbox,int devid)162*4882a593Smuzhiyun int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
165*4882a593Smuzhiyun 	unsigned long timeout = jiffies + 1 * HZ;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	while (!time_after(jiffies, timeout)) {
168*4882a593Smuzhiyun 		if (mdev->num_msgs == mdev->msgs_acked)
169*4882a593Smuzhiyun 			return 0;
170*4882a593Smuzhiyun 		cpu_relax();
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 	return -EIO;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
175*4882a593Smuzhiyun 
otx2_mbox_msg_send(struct otx2_mbox * mbox,int devid)176*4882a593Smuzhiyun void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
179*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
180*4882a593Smuzhiyun 	struct mbox_hdr *tx_hdr, *rx_hdr;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	tx_hdr = hw_mbase + mbox->tx_start;
183*4882a593Smuzhiyun 	rx_hdr = hw_mbase + mbox->rx_start;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* If bounce buffer is implemented copy mbox messages from
186*4882a593Smuzhiyun 	 * bounce buffer to hw mbox memory.
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	if (mdev->mbase != hw_mbase)
189*4882a593Smuzhiyun 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
190*4882a593Smuzhiyun 		       mdev->mbase + mbox->tx_start + msgs_offset,
191*4882a593Smuzhiyun 		       mdev->msg_size);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	tx_hdr->msg_size = mdev->msg_size;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* Reset header for next messages */
198*4882a593Smuzhiyun 	mdev->msg_size = 0;
199*4882a593Smuzhiyun 	mdev->rsp_size = 0;
200*4882a593Smuzhiyun 	mdev->msgs_acked = 0;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* Sync mbox data into memory */
203*4882a593Smuzhiyun 	smp_wmb();
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* num_msgs != 0 signals to the peer that the buffer has a number of
206*4882a593Smuzhiyun 	 * messages.  So this should be written after writing all the messages
207*4882a593Smuzhiyun 	 * to the shared memory.
208*4882a593Smuzhiyun 	 */
209*4882a593Smuzhiyun 	tx_hdr->num_msgs = mdev->num_msgs;
210*4882a593Smuzhiyun 	rx_hdr->num_msgs = 0;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* The interrupt should be fired after num_msgs is written
217*4882a593Smuzhiyun 	 * to the shared memory
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	writeq(1, (void __iomem *)mbox->reg_base +
220*4882a593Smuzhiyun 	       (mbox->trigger | (devid << mbox->tr_shift)));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_msg_send);
223*4882a593Smuzhiyun 
otx2_mbox_alloc_msg_rsp(struct otx2_mbox * mbox,int devid,int size,int size_rsp)224*4882a593Smuzhiyun struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
225*4882a593Smuzhiyun 					    int size, int size_rsp)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
228*4882a593Smuzhiyun 	struct mbox_msghdr *msghdr = NULL;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
231*4882a593Smuzhiyun 	size = ALIGN(size, MBOX_MSG_ALIGN);
232*4882a593Smuzhiyun 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
233*4882a593Smuzhiyun 	/* Check if there is space in mailbox */
234*4882a593Smuzhiyun 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
235*4882a593Smuzhiyun 		goto exit;
236*4882a593Smuzhiyun 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
237*4882a593Smuzhiyun 		goto exit;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (mdev->msg_size == 0)
240*4882a593Smuzhiyun 		mdev->num_msgs = 0;
241*4882a593Smuzhiyun 	mdev->num_msgs++;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* Clear the whole msg region */
246*4882a593Smuzhiyun 	memset(msghdr, 0, size);
247*4882a593Smuzhiyun 	/* Init message header with reset values */
248*4882a593Smuzhiyun 	msghdr->ver = OTX2_MBOX_VERSION;
249*4882a593Smuzhiyun 	mdev->msg_size += size;
250*4882a593Smuzhiyun 	mdev->rsp_size += size_rsp;
251*4882a593Smuzhiyun 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
252*4882a593Smuzhiyun exit:
253*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return msghdr;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
258*4882a593Smuzhiyun 
otx2_mbox_get_rsp(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * msg)259*4882a593Smuzhiyun struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
260*4882a593Smuzhiyun 				      struct mbox_msghdr *msg)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	unsigned long imsg = mbox->tx_start + msgs_offset;
263*4882a593Smuzhiyun 	unsigned long irsp = mbox->rx_start + msgs_offset;
264*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
265*4882a593Smuzhiyun 	u16 msgs;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (mdev->num_msgs != mdev->msgs_acked)
270*4882a593Smuzhiyun 		goto error;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
273*4882a593Smuzhiyun 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
274*4882a593Smuzhiyun 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		if (msg == pmsg) {
277*4882a593Smuzhiyun 			if (pmsg->id != prsp->id)
278*4882a593Smuzhiyun 				goto error;
279*4882a593Smuzhiyun 			spin_unlock(&mdev->mbox_lock);
280*4882a593Smuzhiyun 			return prsp;
281*4882a593Smuzhiyun 		}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		imsg = mbox->tx_start + pmsg->next_msgoff;
284*4882a593Smuzhiyun 		irsp = mbox->rx_start + prsp->next_msgoff;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun error:
288*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
289*4882a593Smuzhiyun 	return ERR_PTR(-ENODEV);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_get_rsp);
292*4882a593Smuzhiyun 
otx2_mbox_check_rsp_msgs(struct otx2_mbox * mbox,int devid)293*4882a593Smuzhiyun int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	unsigned long ireq = mbox->tx_start + msgs_offset;
296*4882a593Smuzhiyun 	unsigned long irsp = mbox->rx_start + msgs_offset;
297*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
298*4882a593Smuzhiyun 	int rc = -ENODEV;
299*4882a593Smuzhiyun 	u16 msgs;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (mdev->num_msgs != mdev->msgs_acked)
304*4882a593Smuzhiyun 		goto exit;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
307*4882a593Smuzhiyun 		struct mbox_msghdr *preq = mdev->mbase + ireq;
308*4882a593Smuzhiyun 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 		if (preq->id != prsp->id) {
311*4882a593Smuzhiyun 			trace_otx2_msg_check(mbox->pdev, preq->id,
312*4882a593Smuzhiyun 					     prsp->id, prsp->rc);
313*4882a593Smuzhiyun 			goto exit;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 		if (prsp->rc) {
316*4882a593Smuzhiyun 			rc = prsp->rc;
317*4882a593Smuzhiyun 			trace_otx2_msg_check(mbox->pdev, preq->id,
318*4882a593Smuzhiyun 					     prsp->id, prsp->rc);
319*4882a593Smuzhiyun 			goto exit;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		ireq = mbox->tx_start + preq->next_msgoff;
323*4882a593Smuzhiyun 		irsp = mbox->rx_start + prsp->next_msgoff;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 	rc = 0;
326*4882a593Smuzhiyun exit:
327*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
328*4882a593Smuzhiyun 	return rc;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun int
otx2_reply_invalid_msg(struct otx2_mbox * mbox,int devid,u16 pcifunc,u16 id)333*4882a593Smuzhiyun otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct msg_rsp *rsp;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	rsp = (struct msg_rsp *)
338*4882a593Smuzhiyun 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
339*4882a593Smuzhiyun 	if (!rsp)
340*4882a593Smuzhiyun 		return -ENOMEM;
341*4882a593Smuzhiyun 	rsp->hdr.id = id;
342*4882a593Smuzhiyun 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
343*4882a593Smuzhiyun 	rsp->hdr.rc = MBOX_MSG_INVALID;
344*4882a593Smuzhiyun 	rsp->hdr.pcifunc = pcifunc;
345*4882a593Smuzhiyun 	return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_reply_invalid_msg);
348*4882a593Smuzhiyun 
otx2_mbox_nonempty(struct otx2_mbox * mbox,int devid)349*4882a593Smuzhiyun bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
352*4882a593Smuzhiyun 	bool ret;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	spin_lock(&mdev->mbox_lock);
355*4882a593Smuzhiyun 	ret = mdev->num_msgs != 0;
356*4882a593Smuzhiyun 	spin_unlock(&mdev->mbox_lock);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return ret;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_nonempty);
361*4882a593Smuzhiyun 
otx2_mbox_id2name(u16 id)362*4882a593Smuzhiyun const char *otx2_mbox_id2name(u16 id)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	switch (id) {
365*4882a593Smuzhiyun #define M(_name, _id, _1, _2, _3) case _id: return # _name;
366*4882a593Smuzhiyun 	MBOX_MESSAGES
367*4882a593Smuzhiyun #undef M
368*4882a593Smuzhiyun 	default:
369*4882a593Smuzhiyun 		return "INVALID ID";
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun EXPORT_SYMBOL(otx2_mbox_id2name);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun MODULE_AUTHOR("Marvell International Ltd.");
375*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
376