xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This program is free software; you may redistribute it and/or modify
6*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
7*4882a593Smuzhiyun  * the Free Software Foundation; version 2 of the License.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16*4882a593Smuzhiyun  * SOFTWARE.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/errno.h>
22*4882a593Smuzhiyun #include <linux/types.h>
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "vnic_dev.h"
28*4882a593Smuzhiyun #include "vnic_rq.h"
29*4882a593Smuzhiyun #include "enic.h"
30*4882a593Smuzhiyun 
vnic_rq_alloc_bufs(struct vnic_rq * rq)31*4882a593Smuzhiyun static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	struct vnic_rq_buf *buf;
34*4882a593Smuzhiyun 	unsigned int i, j, count = rq->ring.desc_count;
35*4882a593Smuzhiyun 	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	for (i = 0; i < blks; i++) {
38*4882a593Smuzhiyun 		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
39*4882a593Smuzhiyun 		if (!rq->bufs[i])
40*4882a593Smuzhiyun 			return -ENOMEM;
41*4882a593Smuzhiyun 	}
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	for (i = 0; i < blks; i++) {
44*4882a593Smuzhiyun 		buf = rq->bufs[i];
45*4882a593Smuzhiyun 		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
46*4882a593Smuzhiyun 			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
47*4882a593Smuzhiyun 			buf->desc = (u8 *)rq->ring.descs +
48*4882a593Smuzhiyun 				rq->ring.desc_size * buf->index;
49*4882a593Smuzhiyun 			if (buf->index + 1 == count) {
50*4882a593Smuzhiyun 				buf->next = rq->bufs[0];
51*4882a593Smuzhiyun 				break;
52*4882a593Smuzhiyun 			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
53*4882a593Smuzhiyun 				buf->next = rq->bufs[i + 1];
54*4882a593Smuzhiyun 			} else {
55*4882a593Smuzhiyun 				buf->next = buf + 1;
56*4882a593Smuzhiyun 				buf++;
57*4882a593Smuzhiyun 			}
58*4882a593Smuzhiyun 		}
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	rq->to_use = rq->to_clean = rq->bufs[0];
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return 0;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
vnic_rq_free(struct vnic_rq * rq)66*4882a593Smuzhiyun void vnic_rq_free(struct vnic_rq *rq)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct vnic_dev *vdev;
69*4882a593Smuzhiyun 	unsigned int i;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	vdev = rq->vdev;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	vnic_dev_free_desc_ring(vdev, &rq->ring);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
76*4882a593Smuzhiyun 		if (rq->bufs[i]) {
77*4882a593Smuzhiyun 			kfree(rq->bufs[i]);
78*4882a593Smuzhiyun 			rq->bufs[i] = NULL;
79*4882a593Smuzhiyun 		}
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	rq->ctrl = NULL;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
vnic_rq_alloc(struct vnic_dev * vdev,struct vnic_rq * rq,unsigned int index,unsigned int desc_count,unsigned int desc_size)85*4882a593Smuzhiyun int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
86*4882a593Smuzhiyun 	unsigned int desc_count, unsigned int desc_size)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int err;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	rq->index = index;
91*4882a593Smuzhiyun 	rq->vdev = vdev;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
94*4882a593Smuzhiyun 	if (!rq->ctrl) {
95*4882a593Smuzhiyun 		vdev_err(vdev, "Failed to hook RQ[%d] resource\n", index);
96*4882a593Smuzhiyun 		return -EINVAL;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	vnic_rq_disable(rq);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
102*4882a593Smuzhiyun 	if (err)
103*4882a593Smuzhiyun 		return err;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	err = vnic_rq_alloc_bufs(rq);
106*4882a593Smuzhiyun 	if (err) {
107*4882a593Smuzhiyun 		vnic_rq_free(rq);
108*4882a593Smuzhiyun 		return err;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
vnic_rq_init_start(struct vnic_rq * rq,unsigned int cq_index,unsigned int fetch_index,unsigned int posted_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset)114*4882a593Smuzhiyun static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
115*4882a593Smuzhiyun 	unsigned int fetch_index, unsigned int posted_index,
116*4882a593Smuzhiyun 	unsigned int error_interrupt_enable,
117*4882a593Smuzhiyun 	unsigned int error_interrupt_offset)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	u64 paddr;
120*4882a593Smuzhiyun 	unsigned int count = rq->ring.desc_count;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
123*4882a593Smuzhiyun 	writeq(paddr, &rq->ctrl->ring_base);
124*4882a593Smuzhiyun 	iowrite32(count, &rq->ctrl->ring_size);
125*4882a593Smuzhiyun 	iowrite32(cq_index, &rq->ctrl->cq_index);
126*4882a593Smuzhiyun 	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
127*4882a593Smuzhiyun 	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
128*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->dropped_packet_count);
129*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->error_status);
130*4882a593Smuzhiyun 	iowrite32(fetch_index, &rq->ctrl->fetch_index);
131*4882a593Smuzhiyun 	iowrite32(posted_index, &rq->ctrl->posted_index);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	rq->to_use = rq->to_clean =
134*4882a593Smuzhiyun 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
135*4882a593Smuzhiyun 			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
vnic_rq_init(struct vnic_rq * rq,unsigned int cq_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset)138*4882a593Smuzhiyun void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
139*4882a593Smuzhiyun 	unsigned int error_interrupt_enable,
140*4882a593Smuzhiyun 	unsigned int error_interrupt_offset)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable,
143*4882a593Smuzhiyun 			   error_interrupt_offset);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
vnic_rq_error_status(struct vnic_rq * rq)146*4882a593Smuzhiyun unsigned int vnic_rq_error_status(struct vnic_rq *rq)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	return ioread32(&rq->ctrl->error_status);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
vnic_rq_enable(struct vnic_rq * rq)151*4882a593Smuzhiyun void vnic_rq_enable(struct vnic_rq *rq)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	iowrite32(1, &rq->ctrl->enable);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
vnic_rq_disable(struct vnic_rq * rq)156*4882a593Smuzhiyun int vnic_rq_disable(struct vnic_rq *rq)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	unsigned int wait;
159*4882a593Smuzhiyun 	struct vnic_dev *vdev = rq->vdev;
160*4882a593Smuzhiyun 	int i;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Due to a race condition with clearing RQ "mini-cache" in hw, we need
163*4882a593Smuzhiyun 	 * to disable the RQ twice to guarantee that stale descriptors are not
164*4882a593Smuzhiyun 	 * used when this RQ is re-enabled.
165*4882a593Smuzhiyun 	 */
166*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
167*4882a593Smuzhiyun 		iowrite32(0, &rq->ctrl->enable);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		/* Wait for HW to ACK disable request */
170*4882a593Smuzhiyun 		for (wait = 20000; wait > 0; wait--)
171*4882a593Smuzhiyun 			if (!ioread32(&rq->ctrl->running))
172*4882a593Smuzhiyun 				break;
173*4882a593Smuzhiyun 		if (!wait) {
174*4882a593Smuzhiyun 			vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
175*4882a593Smuzhiyun 				    rq->index);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 			return -ETIMEDOUT;
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf))184*4882a593Smuzhiyun void vnic_rq_clean(struct vnic_rq *rq,
185*4882a593Smuzhiyun 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct vnic_rq_buf *buf;
188*4882a593Smuzhiyun 	u32 fetch_index;
189*4882a593Smuzhiyun 	unsigned int count = rq->ring.desc_count;
190*4882a593Smuzhiyun 	int i;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	buf = rq->to_clean;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	for (i = 0; i < rq->ring.desc_count; i++) {
195*4882a593Smuzhiyun 		(*buf_clean)(rq, buf);
196*4882a593Smuzhiyun 		buf = buf->next;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	rq->ring.desc_avail = rq->ring.desc_count - 1;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Use current fetch_index as the ring starting point */
201*4882a593Smuzhiyun 	fetch_index = ioread32(&rq->ctrl->fetch_index);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
204*4882a593Smuzhiyun 		/* Hardware surprise removal: reset fetch_index */
205*4882a593Smuzhiyun 		fetch_index = 0;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 	rq->to_use = rq->to_clean =
208*4882a593Smuzhiyun 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
209*4882a593Smuzhiyun 			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
210*4882a593Smuzhiyun 	iowrite32(fetch_index, &rq->ctrl->posted_index);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Anytime we write fetch_index, we need to re-write 0 to rq->enable
213*4882a593Smuzhiyun 	 * to re-sync internal VIC state.
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->enable);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	vnic_dev_clear_desc_ring(&rq->ring);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220