xref: /OK3568_Linux_fs/kernel/drivers/scsi/fnic/vnic_rq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This program is free software; you may redistribute it and/or modify
6*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
7*4882a593Smuzhiyun  * the Free Software Foundation; version 2 of the License.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16*4882a593Smuzhiyun  * SOFTWARE.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/errno.h>
20*4882a593Smuzhiyun #include <linux/types.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include "vnic_dev.h"
25*4882a593Smuzhiyun #include "vnic_rq.h"
26*4882a593Smuzhiyun 
vnic_rq_alloc_bufs(struct vnic_rq * rq)27*4882a593Smuzhiyun static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct vnic_rq_buf *buf;
30*4882a593Smuzhiyun 	unsigned int i, j, count = rq->ring.desc_count;
31*4882a593Smuzhiyun 	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	for (i = 0; i < blks; i++) {
34*4882a593Smuzhiyun 		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
35*4882a593Smuzhiyun 		if (!rq->bufs[i]) {
36*4882a593Smuzhiyun 			printk(KERN_ERR "Failed to alloc rq_bufs\n");
37*4882a593Smuzhiyun 			return -ENOMEM;
38*4882a593Smuzhiyun 		}
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	for (i = 0; i < blks; i++) {
42*4882a593Smuzhiyun 		buf = rq->bufs[i];
43*4882a593Smuzhiyun 		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
44*4882a593Smuzhiyun 			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
45*4882a593Smuzhiyun 			buf->desc = (u8 *)rq->ring.descs +
46*4882a593Smuzhiyun 				rq->ring.desc_size * buf->index;
47*4882a593Smuzhiyun 			if (buf->index + 1 == count) {
48*4882a593Smuzhiyun 				buf->next = rq->bufs[0];
49*4882a593Smuzhiyun 				break;
50*4882a593Smuzhiyun 			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
51*4882a593Smuzhiyun 				buf->next = rq->bufs[i + 1];
52*4882a593Smuzhiyun 			} else {
53*4882a593Smuzhiyun 				buf->next = buf + 1;
54*4882a593Smuzhiyun 				buf++;
55*4882a593Smuzhiyun 			}
56*4882a593Smuzhiyun 		}
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	rq->to_use = rq->to_clean = rq->bufs[0];
60*4882a593Smuzhiyun 	rq->buf_index = 0;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
vnic_rq_free(struct vnic_rq * rq)65*4882a593Smuzhiyun void vnic_rq_free(struct vnic_rq *rq)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct vnic_dev *vdev;
68*4882a593Smuzhiyun 	unsigned int i;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	vdev = rq->vdev;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	vnic_dev_free_desc_ring(vdev, &rq->ring);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
75*4882a593Smuzhiyun 		kfree(rq->bufs[i]);
76*4882a593Smuzhiyun 		rq->bufs[i] = NULL;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	rq->ctrl = NULL;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
vnic_rq_alloc(struct vnic_dev * vdev,struct vnic_rq * rq,unsigned int index,unsigned int desc_count,unsigned int desc_size)82*4882a593Smuzhiyun int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
83*4882a593Smuzhiyun 	unsigned int desc_count, unsigned int desc_size)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	int err;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	rq->index = index;
88*4882a593Smuzhiyun 	rq->vdev = vdev;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
91*4882a593Smuzhiyun 	if (!rq->ctrl) {
92*4882a593Smuzhiyun 		printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
93*4882a593Smuzhiyun 		return -EINVAL;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	vnic_rq_disable(rq);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
99*4882a593Smuzhiyun 	if (err)
100*4882a593Smuzhiyun 		return err;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	err = vnic_rq_alloc_bufs(rq);
103*4882a593Smuzhiyun 	if (err) {
104*4882a593Smuzhiyun 		vnic_rq_free(rq);
105*4882a593Smuzhiyun 		return err;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return 0;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
vnic_rq_init(struct vnic_rq * rq,unsigned int cq_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset)111*4882a593Smuzhiyun void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
112*4882a593Smuzhiyun 	unsigned int error_interrupt_enable,
113*4882a593Smuzhiyun 	unsigned int error_interrupt_offset)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	u64 paddr;
116*4882a593Smuzhiyun 	u32 fetch_index;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
119*4882a593Smuzhiyun 	writeq(paddr, &rq->ctrl->ring_base);
120*4882a593Smuzhiyun 	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
121*4882a593Smuzhiyun 	iowrite32(cq_index, &rq->ctrl->cq_index);
122*4882a593Smuzhiyun 	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
123*4882a593Smuzhiyun 	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
124*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->dropped_packet_count);
125*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->error_status);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Use current fetch_index as the ring starting point */
128*4882a593Smuzhiyun 	fetch_index = ioread32(&rq->ctrl->fetch_index);
129*4882a593Smuzhiyun 	rq->to_use = rq->to_clean =
130*4882a593Smuzhiyun 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
131*4882a593Smuzhiyun 			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
132*4882a593Smuzhiyun 	iowrite32(fetch_index, &rq->ctrl->posted_index);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	rq->buf_index = 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
vnic_rq_error_status(struct vnic_rq * rq)137*4882a593Smuzhiyun unsigned int vnic_rq_error_status(struct vnic_rq *rq)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return ioread32(&rq->ctrl->error_status);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
vnic_rq_enable(struct vnic_rq * rq)142*4882a593Smuzhiyun void vnic_rq_enable(struct vnic_rq *rq)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	iowrite32(1, &rq->ctrl->enable);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
vnic_rq_disable(struct vnic_rq * rq)147*4882a593Smuzhiyun int vnic_rq_disable(struct vnic_rq *rq)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	unsigned int wait;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	iowrite32(0, &rq->ctrl->enable);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Wait for HW to ACK disable request */
154*4882a593Smuzhiyun 	for (wait = 0; wait < 100; wait++) {
155*4882a593Smuzhiyun 		if (!(ioread32(&rq->ctrl->running)))
156*4882a593Smuzhiyun 			return 0;
157*4882a593Smuzhiyun 		udelay(1);
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return -ETIMEDOUT;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf))165*4882a593Smuzhiyun void vnic_rq_clean(struct vnic_rq *rq,
166*4882a593Smuzhiyun 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct vnic_rq_buf *buf;
169*4882a593Smuzhiyun 	u32 fetch_index;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	WARN_ON(ioread32(&rq->ctrl->enable));
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	buf = rq->to_clean;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	while (vnic_rq_desc_used(rq) > 0) {
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		(*buf_clean)(rq, buf);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		buf = rq->to_clean = buf->next;
180*4882a593Smuzhiyun 		rq->ring.desc_avail++;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Use current fetch_index as the ring starting point */
184*4882a593Smuzhiyun 	fetch_index = ioread32(&rq->ctrl->fetch_index);
185*4882a593Smuzhiyun 	rq->to_use = rq->to_clean =
186*4882a593Smuzhiyun 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
187*4882a593Smuzhiyun 			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
188*4882a593Smuzhiyun 	iowrite32(fetch_index, &rq->ctrl->posted_index);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	rq->buf_index = 0;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	vnic_dev_clear_desc_ring(&rq->ring);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195