xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/qib/qib_user_sdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #include <linux/mm.h>
33*4882a593Smuzhiyun #include <linux/types.h>
34*4882a593Smuzhiyun #include <linux/device.h>
35*4882a593Smuzhiyun #include <linux/dmapool.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/list.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/io.h>
40*4882a593Smuzhiyun #include <linux/uio.h>
41*4882a593Smuzhiyun #include <linux/rbtree.h>
42*4882a593Smuzhiyun #include <linux/spinlock.h>
43*4882a593Smuzhiyun #include <linux/delay.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "qib.h"
46*4882a593Smuzhiyun #include "qib_user_sdma.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* minimum size of header */
49*4882a593Smuzhiyun #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50*4882a593Smuzhiyun /* expected size of headers (for dma_pool) */
51*4882a593Smuzhiyun #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52*4882a593Smuzhiyun /* attempt to drain the queue for 5secs */
53*4882a593Smuzhiyun #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * track how many times a process open this driver.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct qib_user_sdma_rb_node {
61*4882a593Smuzhiyun 	struct rb_node node;
62*4882a593Smuzhiyun 	int refcount;
63*4882a593Smuzhiyun 	pid_t pid;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct qib_user_sdma_pkt {
67*4882a593Smuzhiyun 	struct list_head list;  /* list element */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	u8  tiddma;		/* if this is NEW tid-sdma */
70*4882a593Smuzhiyun 	u8  largepkt;		/* this is large pkt from kmalloc */
71*4882a593Smuzhiyun 	u16 frag_size;		/* frag size used by PSM */
72*4882a593Smuzhiyun 	u16 index;              /* last header index or push index */
73*4882a593Smuzhiyun 	u16 naddr;              /* dimension of addr (1..3) ... */
74*4882a593Smuzhiyun 	u16 addrlimit;		/* addr array size */
75*4882a593Smuzhiyun 	u16 tidsmidx;		/* current tidsm index */
76*4882a593Smuzhiyun 	u16 tidsmcount;		/* tidsm array item count */
77*4882a593Smuzhiyun 	u16 payload_size;	/* payload size so far for header */
78*4882a593Smuzhiyun 	u32 bytes_togo;		/* bytes for processing */
79*4882a593Smuzhiyun 	u32 counter;            /* sdma pkts queued counter for this entry */
80*4882a593Smuzhiyun 	struct qib_tid_session_member *tidsm;	/* tid session member array */
81*4882a593Smuzhiyun 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */
82*4882a593Smuzhiyun 	u64 added;              /* global descq number of entries */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	struct {
85*4882a593Smuzhiyun 		u16 offset;                     /* offset for kvaddr, addr */
86*4882a593Smuzhiyun 		u16 length;                     /* length in page */
87*4882a593Smuzhiyun 		u16 first_desc;			/* first desc */
88*4882a593Smuzhiyun 		u16 last_desc;			/* last desc */
89*4882a593Smuzhiyun 		u16 put_page;                   /* should we put_page? */
90*4882a593Smuzhiyun 		u16 dma_mapped;                 /* is page dma_mapped? */
91*4882a593Smuzhiyun 		u16 dma_length;			/* for dma_unmap_page() */
92*4882a593Smuzhiyun 		u16 padding;
93*4882a593Smuzhiyun 		struct page *page;              /* may be NULL (coherent mem) */
94*4882a593Smuzhiyun 		void *kvaddr;                   /* FIXME: only for pio hack */
95*4882a593Smuzhiyun 		dma_addr_t addr;
96*4882a593Smuzhiyun 	} addr[4];   /* max pages, any more and we coalesce */
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun struct qib_user_sdma_queue {
100*4882a593Smuzhiyun 	/*
101*4882a593Smuzhiyun 	 * pkts sent to dma engine are queued on this
102*4882a593Smuzhiyun 	 * list head.  the type of the elements of this
103*4882a593Smuzhiyun 	 * list are struct qib_user_sdma_pkt...
104*4882a593Smuzhiyun 	 */
105*4882a593Smuzhiyun 	struct list_head sent;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/*
108*4882a593Smuzhiyun 	 * Because above list will be accessed by both process and
109*4882a593Smuzhiyun 	 * signal handler, we need a spinlock for it.
110*4882a593Smuzhiyun 	 */
111*4882a593Smuzhiyun 	spinlock_t sent_lock ____cacheline_aligned_in_smp;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* headers with expected length are allocated from here... */
114*4882a593Smuzhiyun 	char header_cache_name[64];
115*4882a593Smuzhiyun 	struct dma_pool *header_cache;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* packets are allocated from the slab cache... */
118*4882a593Smuzhiyun 	char pkt_slab_name[64];
119*4882a593Smuzhiyun 	struct kmem_cache *pkt_slab;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* as packets go on the queued queue, they are counted... */
122*4882a593Smuzhiyun 	u32 counter;
123*4882a593Smuzhiyun 	u32 sent_counter;
124*4882a593Smuzhiyun 	/* pending packets, not sending yet */
125*4882a593Smuzhiyun 	u32 num_pending;
126*4882a593Smuzhiyun 	/* sending packets, not complete yet */
127*4882a593Smuzhiyun 	u32 num_sending;
128*4882a593Smuzhiyun 	/* global descq number of entry of last sending packet */
129*4882a593Smuzhiyun 	u64 added;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* dma page table */
132*4882a593Smuzhiyun 	struct rb_root dma_pages_root;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	struct qib_user_sdma_rb_node *sdma_rb_node;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* protect everything above... */
137*4882a593Smuzhiyun 	struct mutex lock;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static struct qib_user_sdma_rb_node *
qib_user_sdma_rb_search(struct rb_root * root,pid_t pid)141*4882a593Smuzhiyun qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct qib_user_sdma_rb_node *sdma_rb_node;
144*4882a593Smuzhiyun 	struct rb_node *node = root->rb_node;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	while (node) {
147*4882a593Smuzhiyun 		sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
148*4882a593Smuzhiyun 					node);
149*4882a593Smuzhiyun 		if (pid < sdma_rb_node->pid)
150*4882a593Smuzhiyun 			node = node->rb_left;
151*4882a593Smuzhiyun 		else if (pid > sdma_rb_node->pid)
152*4882a593Smuzhiyun 			node = node->rb_right;
153*4882a593Smuzhiyun 		else
154*4882a593Smuzhiyun 			return sdma_rb_node;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 	return NULL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static int
qib_user_sdma_rb_insert(struct rb_root * root,struct qib_user_sdma_rb_node * new)160*4882a593Smuzhiyun qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct rb_node **node = &(root->rb_node);
163*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
164*4882a593Smuzhiyun 	struct qib_user_sdma_rb_node *got;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	while (*node) {
167*4882a593Smuzhiyun 		got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
168*4882a593Smuzhiyun 		parent = *node;
169*4882a593Smuzhiyun 		if (new->pid < got->pid)
170*4882a593Smuzhiyun 			node = &((*node)->rb_left);
171*4882a593Smuzhiyun 		else if (new->pid > got->pid)
172*4882a593Smuzhiyun 			node = &((*node)->rb_right);
173*4882a593Smuzhiyun 		else
174*4882a593Smuzhiyun 			return 0;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	rb_link_node(&new->node, parent, node);
178*4882a593Smuzhiyun 	rb_insert_color(&new->node, root);
179*4882a593Smuzhiyun 	return 1;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device * dev,int unit,int ctxt,int sctxt)183*4882a593Smuzhiyun qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct qib_user_sdma_queue *pq =
186*4882a593Smuzhiyun 		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187*4882a593Smuzhiyun 	struct qib_user_sdma_rb_node *sdma_rb_node;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (!pq)
190*4882a593Smuzhiyun 		goto done;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	pq->counter = 0;
193*4882a593Smuzhiyun 	pq->sent_counter = 0;
194*4882a593Smuzhiyun 	pq->num_pending = 0;
195*4882a593Smuzhiyun 	pq->num_sending = 0;
196*4882a593Smuzhiyun 	pq->added = 0;
197*4882a593Smuzhiyun 	pq->sdma_rb_node = NULL;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pq->sent);
200*4882a593Smuzhiyun 	spin_lock_init(&pq->sent_lock);
201*4882a593Smuzhiyun 	mutex_init(&pq->lock);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204*4882a593Smuzhiyun 		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205*4882a593Smuzhiyun 	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206*4882a593Smuzhiyun 					 sizeof(struct qib_user_sdma_pkt),
207*4882a593Smuzhiyun 					 0, 0, NULL);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (!pq->pkt_slab)
210*4882a593Smuzhiyun 		goto err_kfree;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213*4882a593Smuzhiyun 		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214*4882a593Smuzhiyun 	pq->header_cache = dma_pool_create(pq->header_cache_name,
215*4882a593Smuzhiyun 					   dev,
216*4882a593Smuzhiyun 					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
217*4882a593Smuzhiyun 					   4, 0);
218*4882a593Smuzhiyun 	if (!pq->header_cache)
219*4882a593Smuzhiyun 		goto err_slab;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	pq->dma_pages_root = RB_ROOT;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224*4882a593Smuzhiyun 					current->pid);
225*4882a593Smuzhiyun 	if (sdma_rb_node) {
226*4882a593Smuzhiyun 		sdma_rb_node->refcount++;
227*4882a593Smuzhiyun 	} else {
228*4882a593Smuzhiyun 		sdma_rb_node = kmalloc(sizeof(
229*4882a593Smuzhiyun 			struct qib_user_sdma_rb_node), GFP_KERNEL);
230*4882a593Smuzhiyun 		if (!sdma_rb_node)
231*4882a593Smuzhiyun 			goto err_rb;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		sdma_rb_node->refcount = 1;
234*4882a593Smuzhiyun 		sdma_rb_node->pid = current->pid;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	pq->sdma_rb_node = sdma_rb_node;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	goto done;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun err_rb:
243*4882a593Smuzhiyun 	dma_pool_destroy(pq->header_cache);
244*4882a593Smuzhiyun err_slab:
245*4882a593Smuzhiyun 	kmem_cache_destroy(pq->pkt_slab);
246*4882a593Smuzhiyun err_kfree:
247*4882a593Smuzhiyun 	kfree(pq);
248*4882a593Smuzhiyun 	pq = NULL;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun done:
251*4882a593Smuzhiyun 	return pq;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
qib_user_sdma_init_frag(struct qib_user_sdma_pkt * pkt,int i,u16 offset,u16 len,u16 first_desc,u16 last_desc,u16 put_page,u16 dma_mapped,struct page * page,void * kvaddr,dma_addr_t dma_addr,u16 dma_length)254*4882a593Smuzhiyun static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
255*4882a593Smuzhiyun 				    int i, u16 offset, u16 len,
256*4882a593Smuzhiyun 				    u16 first_desc, u16 last_desc,
257*4882a593Smuzhiyun 				    u16 put_page, u16 dma_mapped,
258*4882a593Smuzhiyun 				    struct page *page, void *kvaddr,
259*4882a593Smuzhiyun 				    dma_addr_t dma_addr, u16 dma_length)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	pkt->addr[i].offset = offset;
262*4882a593Smuzhiyun 	pkt->addr[i].length = len;
263*4882a593Smuzhiyun 	pkt->addr[i].first_desc = first_desc;
264*4882a593Smuzhiyun 	pkt->addr[i].last_desc = last_desc;
265*4882a593Smuzhiyun 	pkt->addr[i].put_page = put_page;
266*4882a593Smuzhiyun 	pkt->addr[i].dma_mapped = dma_mapped;
267*4882a593Smuzhiyun 	pkt->addr[i].page = page;
268*4882a593Smuzhiyun 	pkt->addr[i].kvaddr = kvaddr;
269*4882a593Smuzhiyun 	pkt->addr[i].addr = dma_addr;
270*4882a593Smuzhiyun 	pkt->addr[i].dma_length = dma_length;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
qib_user_sdma_alloc_header(struct qib_user_sdma_queue * pq,size_t len,dma_addr_t * dma_addr)273*4882a593Smuzhiyun static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
274*4882a593Smuzhiyun 				size_t len, dma_addr_t *dma_addr)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	void *hdr;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
279*4882a593Smuzhiyun 		hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
280*4882a593Smuzhiyun 					     dma_addr);
281*4882a593Smuzhiyun 	else
282*4882a593Smuzhiyun 		hdr = NULL;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (!hdr) {
285*4882a593Smuzhiyun 		hdr = kmalloc(len, GFP_KERNEL);
286*4882a593Smuzhiyun 		if (!hdr)
287*4882a593Smuzhiyun 			return NULL;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		*dma_addr = 0;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return hdr;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
qib_user_sdma_page_to_frags(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,struct page * page,u16 put,u16 offset,u16 len,void * kvaddr)295*4882a593Smuzhiyun static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
296*4882a593Smuzhiyun 				       struct qib_user_sdma_queue *pq,
297*4882a593Smuzhiyun 				       struct qib_user_sdma_pkt *pkt,
298*4882a593Smuzhiyun 				       struct page *page, u16 put,
299*4882a593Smuzhiyun 				       u16 offset, u16 len, void *kvaddr)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	__le16 *pbc16;
302*4882a593Smuzhiyun 	void *pbcvaddr;
303*4882a593Smuzhiyun 	struct qib_message_header *hdr;
304*4882a593Smuzhiyun 	u16 newlen, pbclen, lastdesc, dma_mapped;
305*4882a593Smuzhiyun 	u32 vcto;
306*4882a593Smuzhiyun 	union qib_seqnum seqnum;
307*4882a593Smuzhiyun 	dma_addr_t pbcdaddr;
308*4882a593Smuzhiyun 	dma_addr_t dma_addr =
309*4882a593Smuzhiyun 		dma_map_page(&dd->pcidev->dev,
310*4882a593Smuzhiyun 			page, offset, len, DMA_TO_DEVICE);
311*4882a593Smuzhiyun 	int ret = 0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
314*4882a593Smuzhiyun 		/*
315*4882a593Smuzhiyun 		 * dma mapping error, pkt has not managed
316*4882a593Smuzhiyun 		 * this page yet, return the page here so
317*4882a593Smuzhiyun 		 * the caller can ignore this page.
318*4882a593Smuzhiyun 		 */
319*4882a593Smuzhiyun 		if (put) {
320*4882a593Smuzhiyun 			unpin_user_page(page);
321*4882a593Smuzhiyun 		} else {
322*4882a593Smuzhiyun 			/* coalesce case */
323*4882a593Smuzhiyun 			kunmap(page);
324*4882a593Smuzhiyun 			__free_page(page);
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 		ret = -ENOMEM;
327*4882a593Smuzhiyun 		goto done;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 	offset = 0;
330*4882a593Smuzhiyun 	dma_mapped = 1;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun next_fragment:
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/*
336*4882a593Smuzhiyun 	 * In tid-sdma, the transfer length is restricted by
337*4882a593Smuzhiyun 	 * receiver side current tid page length.
338*4882a593Smuzhiyun 	 */
339*4882a593Smuzhiyun 	if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
340*4882a593Smuzhiyun 		newlen = pkt->tidsm[pkt->tidsmidx].length;
341*4882a593Smuzhiyun 	else
342*4882a593Smuzhiyun 		newlen = len;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * Then the transfer length is restricted by MTU.
346*4882a593Smuzhiyun 	 * the last descriptor flag is determined by:
347*4882a593Smuzhiyun 	 * 1. the current packet is at frag size length.
348*4882a593Smuzhiyun 	 * 2. the current tid page is done if tid-sdma.
349*4882a593Smuzhiyun 	 * 3. there is no more byte togo if sdma.
350*4882a593Smuzhiyun 	 */
351*4882a593Smuzhiyun 	lastdesc = 0;
352*4882a593Smuzhiyun 	if ((pkt->payload_size + newlen) >= pkt->frag_size) {
353*4882a593Smuzhiyun 		newlen = pkt->frag_size - pkt->payload_size;
354*4882a593Smuzhiyun 		lastdesc = 1;
355*4882a593Smuzhiyun 	} else if (pkt->tiddma) {
356*4882a593Smuzhiyun 		if (newlen == pkt->tidsm[pkt->tidsmidx].length)
357*4882a593Smuzhiyun 			lastdesc = 1;
358*4882a593Smuzhiyun 	} else {
359*4882a593Smuzhiyun 		if (newlen == pkt->bytes_togo)
360*4882a593Smuzhiyun 			lastdesc = 1;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* fill the next fragment in this page */
364*4882a593Smuzhiyun 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
365*4882a593Smuzhiyun 		offset, newlen,		/* offset, len */
366*4882a593Smuzhiyun 		0, lastdesc,		/* first last desc */
367*4882a593Smuzhiyun 		put, dma_mapped,	/* put page, dma mapped */
368*4882a593Smuzhiyun 		page, kvaddr,		/* struct page, virt addr */
369*4882a593Smuzhiyun 		dma_addr, len);		/* dma addr, dma length */
370*4882a593Smuzhiyun 	pkt->bytes_togo -= newlen;
371*4882a593Smuzhiyun 	pkt->payload_size += newlen;
372*4882a593Smuzhiyun 	pkt->naddr++;
373*4882a593Smuzhiyun 	if (pkt->naddr == pkt->addrlimit) {
374*4882a593Smuzhiyun 		ret = -EFAULT;
375*4882a593Smuzhiyun 		goto done;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* If there is no more byte togo. (lastdesc==1) */
379*4882a593Smuzhiyun 	if (pkt->bytes_togo == 0) {
380*4882a593Smuzhiyun 		/* The packet is done, header is not dma mapped yet.
381*4882a593Smuzhiyun 		 * it should be from kmalloc */
382*4882a593Smuzhiyun 		if (!pkt->addr[pkt->index].addr) {
383*4882a593Smuzhiyun 			pkt->addr[pkt->index].addr =
384*4882a593Smuzhiyun 				dma_map_single(&dd->pcidev->dev,
385*4882a593Smuzhiyun 					pkt->addr[pkt->index].kvaddr,
386*4882a593Smuzhiyun 					pkt->addr[pkt->index].dma_length,
387*4882a593Smuzhiyun 					DMA_TO_DEVICE);
388*4882a593Smuzhiyun 			if (dma_mapping_error(&dd->pcidev->dev,
389*4882a593Smuzhiyun 					pkt->addr[pkt->index].addr)) {
390*4882a593Smuzhiyun 				ret = -ENOMEM;
391*4882a593Smuzhiyun 				goto done;
392*4882a593Smuzhiyun 			}
393*4882a593Smuzhiyun 			pkt->addr[pkt->index].dma_mapped = 1;
394*4882a593Smuzhiyun 		}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		goto done;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* If tid-sdma, advance tid info. */
400*4882a593Smuzhiyun 	if (pkt->tiddma) {
401*4882a593Smuzhiyun 		pkt->tidsm[pkt->tidsmidx].length -= newlen;
402*4882a593Smuzhiyun 		if (pkt->tidsm[pkt->tidsmidx].length) {
403*4882a593Smuzhiyun 			pkt->tidsm[pkt->tidsmidx].offset += newlen;
404*4882a593Smuzhiyun 		} else {
405*4882a593Smuzhiyun 			pkt->tidsmidx++;
406*4882a593Smuzhiyun 			if (pkt->tidsmidx == pkt->tidsmcount) {
407*4882a593Smuzhiyun 				ret = -EFAULT;
408*4882a593Smuzhiyun 				goto done;
409*4882a593Smuzhiyun 			}
410*4882a593Smuzhiyun 		}
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/*
414*4882a593Smuzhiyun 	 * If this is NOT the last descriptor. (newlen==len)
415*4882a593Smuzhiyun 	 * the current packet is not done yet, but the current
416*4882a593Smuzhiyun 	 * send side page is done.
417*4882a593Smuzhiyun 	 */
418*4882a593Smuzhiyun 	if (lastdesc == 0)
419*4882a593Smuzhiyun 		goto done;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * If running this driver under PSM with message size
423*4882a593Smuzhiyun 	 * fitting into one transfer unit, it is not possible
424*4882a593Smuzhiyun 	 * to pass this line. otherwise, it is a buggggg.
425*4882a593Smuzhiyun 	 */
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/*
428*4882a593Smuzhiyun 	 * Since the current packet is done, and there are more
429*4882a593Smuzhiyun 	 * bytes togo, we need to create a new sdma header, copying
430*4882a593Smuzhiyun 	 * from previous sdma header and modify both.
431*4882a593Smuzhiyun 	 */
432*4882a593Smuzhiyun 	pbclen = pkt->addr[pkt->index].length;
433*4882a593Smuzhiyun 	pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
434*4882a593Smuzhiyun 	if (!pbcvaddr) {
435*4882a593Smuzhiyun 		ret = -ENOMEM;
436*4882a593Smuzhiyun 		goto done;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 	/* Copy the previous sdma header to new sdma header */
439*4882a593Smuzhiyun 	pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
440*4882a593Smuzhiyun 	memcpy(pbcvaddr, pbc16, pbclen);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* Modify the previous sdma header */
443*4882a593Smuzhiyun 	hdr = (struct qib_message_header *)&pbc16[4];
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* New pbc length */
446*4882a593Smuzhiyun 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* New packet length */
449*4882a593Smuzhiyun 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (pkt->tiddma) {
452*4882a593Smuzhiyun 		/* turn on the header suppression */
453*4882a593Smuzhiyun 		hdr->iph.pkt_flags =
454*4882a593Smuzhiyun 			cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
455*4882a593Smuzhiyun 		/* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
456*4882a593Smuzhiyun 		hdr->flags &= ~(0x04|0x20);
457*4882a593Smuzhiyun 	} else {
458*4882a593Smuzhiyun 		/* turn off extra bytes: 20-21 bits */
459*4882a593Smuzhiyun 		hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
460*4882a593Smuzhiyun 		/* turn off ACK_REQ: 0x04 */
461*4882a593Smuzhiyun 		hdr->flags &= ~(0x04);
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* New kdeth checksum */
465*4882a593Smuzhiyun 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
466*4882a593Smuzhiyun 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
467*4882a593Smuzhiyun 		be16_to_cpu(hdr->lrh[2]) -
468*4882a593Smuzhiyun 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
469*4882a593Smuzhiyun 		le16_to_cpu(hdr->iph.pkt_flags));
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/* The packet is done, header is not dma mapped yet.
472*4882a593Smuzhiyun 	 * it should be from kmalloc */
473*4882a593Smuzhiyun 	if (!pkt->addr[pkt->index].addr) {
474*4882a593Smuzhiyun 		pkt->addr[pkt->index].addr =
475*4882a593Smuzhiyun 			dma_map_single(&dd->pcidev->dev,
476*4882a593Smuzhiyun 				pkt->addr[pkt->index].kvaddr,
477*4882a593Smuzhiyun 				pkt->addr[pkt->index].dma_length,
478*4882a593Smuzhiyun 				DMA_TO_DEVICE);
479*4882a593Smuzhiyun 		if (dma_mapping_error(&dd->pcidev->dev,
480*4882a593Smuzhiyun 				pkt->addr[pkt->index].addr)) {
481*4882a593Smuzhiyun 			ret = -ENOMEM;
482*4882a593Smuzhiyun 			goto done;
483*4882a593Smuzhiyun 		}
484*4882a593Smuzhiyun 		pkt->addr[pkt->index].dma_mapped = 1;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* Modify the new sdma header */
488*4882a593Smuzhiyun 	pbc16 = (__le16 *)pbcvaddr;
489*4882a593Smuzhiyun 	hdr = (struct qib_message_header *)&pbc16[4];
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* New pbc length */
492*4882a593Smuzhiyun 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* New packet length */
495*4882a593Smuzhiyun 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (pkt->tiddma) {
498*4882a593Smuzhiyun 		/* Set new tid and offset for new sdma header */
499*4882a593Smuzhiyun 		hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
500*4882a593Smuzhiyun 			(le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
501*4882a593Smuzhiyun 			(pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
502*4882a593Smuzhiyun 			(pkt->tidsm[pkt->tidsmidx].offset>>2));
503*4882a593Smuzhiyun 	} else {
504*4882a593Smuzhiyun 		/* Middle protocol new packet offset */
505*4882a593Smuzhiyun 		hdr->uwords[2] += pkt->payload_size;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/* New kdeth checksum */
509*4882a593Smuzhiyun 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
510*4882a593Smuzhiyun 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
511*4882a593Smuzhiyun 		be16_to_cpu(hdr->lrh[2]) -
512*4882a593Smuzhiyun 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
513*4882a593Smuzhiyun 		le16_to_cpu(hdr->iph.pkt_flags));
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Next sequence number in new sdma header */
516*4882a593Smuzhiyun 	seqnum.val = be32_to_cpu(hdr->bth[2]);
517*4882a593Smuzhiyun 	if (pkt->tiddma)
518*4882a593Smuzhiyun 		seqnum.seq++;
519*4882a593Smuzhiyun 	else
520*4882a593Smuzhiyun 		seqnum.pkt++;
521*4882a593Smuzhiyun 	hdr->bth[2] = cpu_to_be32(seqnum.val);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/* Init new sdma header. */
524*4882a593Smuzhiyun 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
525*4882a593Smuzhiyun 		0, pbclen,		/* offset, len */
526*4882a593Smuzhiyun 		1, 0,			/* first last desc */
527*4882a593Smuzhiyun 		0, 0,			/* put page, dma mapped */
528*4882a593Smuzhiyun 		NULL, pbcvaddr,		/* struct page, virt addr */
529*4882a593Smuzhiyun 		pbcdaddr, pbclen);	/* dma addr, dma length */
530*4882a593Smuzhiyun 	pkt->index = pkt->naddr;
531*4882a593Smuzhiyun 	pkt->payload_size = 0;
532*4882a593Smuzhiyun 	pkt->naddr++;
533*4882a593Smuzhiyun 	if (pkt->naddr == pkt->addrlimit) {
534*4882a593Smuzhiyun 		ret = -EFAULT;
535*4882a593Smuzhiyun 		goto done;
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* Prepare for next fragment in this page */
539*4882a593Smuzhiyun 	if (newlen != len) {
540*4882a593Smuzhiyun 		if (dma_mapped) {
541*4882a593Smuzhiyun 			put = 0;
542*4882a593Smuzhiyun 			dma_mapped = 0;
543*4882a593Smuzhiyun 			page = NULL;
544*4882a593Smuzhiyun 			kvaddr = NULL;
545*4882a593Smuzhiyun 		}
546*4882a593Smuzhiyun 		len -= newlen;
547*4882a593Smuzhiyun 		offset += newlen;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		goto next_fragment;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun done:
553*4882a593Smuzhiyun 	return ret;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /* we've too many pages in the iovec, coalesce to a single page */
qib_user_sdma_coalesce(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)557*4882a593Smuzhiyun static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
558*4882a593Smuzhiyun 				  struct qib_user_sdma_queue *pq,
559*4882a593Smuzhiyun 				  struct qib_user_sdma_pkt *pkt,
560*4882a593Smuzhiyun 				  const struct iovec *iov,
561*4882a593Smuzhiyun 				  unsigned long niov)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	int ret = 0;
564*4882a593Smuzhiyun 	struct page *page = alloc_page(GFP_KERNEL);
565*4882a593Smuzhiyun 	void *mpage_save;
566*4882a593Smuzhiyun 	char *mpage;
567*4882a593Smuzhiyun 	int i;
568*4882a593Smuzhiyun 	int len = 0;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (!page) {
571*4882a593Smuzhiyun 		ret = -ENOMEM;
572*4882a593Smuzhiyun 		goto done;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	mpage = kmap(page);
576*4882a593Smuzhiyun 	mpage_save = mpage;
577*4882a593Smuzhiyun 	for (i = 0; i < niov; i++) {
578*4882a593Smuzhiyun 		int cfur;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		cfur = copy_from_user(mpage,
581*4882a593Smuzhiyun 				      iov[i].iov_base, iov[i].iov_len);
582*4882a593Smuzhiyun 		if (cfur) {
583*4882a593Smuzhiyun 			ret = -EFAULT;
584*4882a593Smuzhiyun 			goto free_unmap;
585*4882a593Smuzhiyun 		}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 		mpage += iov[i].iov_len;
588*4882a593Smuzhiyun 		len += iov[i].iov_len;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
592*4882a593Smuzhiyun 			page, 0, 0, len, mpage_save);
593*4882a593Smuzhiyun 	goto done;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun free_unmap:
596*4882a593Smuzhiyun 	kunmap(page);
597*4882a593Smuzhiyun 	__free_page(page);
598*4882a593Smuzhiyun done:
599*4882a593Smuzhiyun 	return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun  * How many pages in this iovec element?
604*4882a593Smuzhiyun  */
qib_user_sdma_num_pages(const struct iovec * iov)605*4882a593Smuzhiyun static size_t qib_user_sdma_num_pages(const struct iovec *iov)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	const unsigned long addr  = (unsigned long) iov->iov_base;
608*4882a593Smuzhiyun 	const unsigned long  len  = iov->iov_len;
609*4882a593Smuzhiyun 	const unsigned long spage = addr & PAGE_MASK;
610*4882a593Smuzhiyun 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	return 1 + ((epage - spage) >> PAGE_SHIFT);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
qib_user_sdma_free_pkt_frag(struct device * dev,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,int frag)615*4882a593Smuzhiyun static void qib_user_sdma_free_pkt_frag(struct device *dev,
616*4882a593Smuzhiyun 					struct qib_user_sdma_queue *pq,
617*4882a593Smuzhiyun 					struct qib_user_sdma_pkt *pkt,
618*4882a593Smuzhiyun 					int frag)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	const int i = frag;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (pkt->addr[i].page) {
623*4882a593Smuzhiyun 		/* only user data has page */
624*4882a593Smuzhiyun 		if (pkt->addr[i].dma_mapped)
625*4882a593Smuzhiyun 			dma_unmap_page(dev,
626*4882a593Smuzhiyun 				       pkt->addr[i].addr,
627*4882a593Smuzhiyun 				       pkt->addr[i].dma_length,
628*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		if (pkt->addr[i].kvaddr)
631*4882a593Smuzhiyun 			kunmap(pkt->addr[i].page);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		if (pkt->addr[i].put_page)
634*4882a593Smuzhiyun 			unpin_user_page(pkt->addr[i].page);
635*4882a593Smuzhiyun 		else
636*4882a593Smuzhiyun 			__free_page(pkt->addr[i].page);
637*4882a593Smuzhiyun 	} else if (pkt->addr[i].kvaddr) {
638*4882a593Smuzhiyun 		/* for headers */
639*4882a593Smuzhiyun 		if (pkt->addr[i].dma_mapped) {
640*4882a593Smuzhiyun 			/* from kmalloc & dma mapped */
641*4882a593Smuzhiyun 			dma_unmap_single(dev,
642*4882a593Smuzhiyun 				       pkt->addr[i].addr,
643*4882a593Smuzhiyun 				       pkt->addr[i].dma_length,
644*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
645*4882a593Smuzhiyun 			kfree(pkt->addr[i].kvaddr);
646*4882a593Smuzhiyun 		} else if (pkt->addr[i].addr) {
647*4882a593Smuzhiyun 			/* free coherent mem from cache... */
648*4882a593Smuzhiyun 			dma_pool_free(pq->header_cache,
649*4882a593Smuzhiyun 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
650*4882a593Smuzhiyun 		} else {
651*4882a593Smuzhiyun 			/* from kmalloc but not dma mapped */
652*4882a593Smuzhiyun 			kfree(pkt->addr[i].kvaddr);
653*4882a593Smuzhiyun 		}
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /* return number of pages pinned... */
qib_user_sdma_pin_pages(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,unsigned long addr,int tlen,size_t npages)658*4882a593Smuzhiyun static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
659*4882a593Smuzhiyun 				   struct qib_user_sdma_queue *pq,
660*4882a593Smuzhiyun 				   struct qib_user_sdma_pkt *pkt,
661*4882a593Smuzhiyun 				   unsigned long addr, int tlen, size_t npages)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct page *pages[8];
664*4882a593Smuzhiyun 	int i, j;
665*4882a593Smuzhiyun 	int ret = 0;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	while (npages) {
668*4882a593Smuzhiyun 		if (npages > 8)
669*4882a593Smuzhiyun 			j = 8;
670*4882a593Smuzhiyun 		else
671*4882a593Smuzhiyun 			j = npages;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
674*4882a593Smuzhiyun 		if (ret != j) {
675*4882a593Smuzhiyun 			i = 0;
676*4882a593Smuzhiyun 			j = ret;
677*4882a593Smuzhiyun 			ret = -ENOMEM;
678*4882a593Smuzhiyun 			goto free_pages;
679*4882a593Smuzhiyun 		}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		for (i = 0; i < j; i++) {
682*4882a593Smuzhiyun 			/* map the pages... */
683*4882a593Smuzhiyun 			unsigned long fofs = addr & ~PAGE_MASK;
684*4882a593Smuzhiyun 			int flen = ((fofs + tlen) > PAGE_SIZE) ?
685*4882a593Smuzhiyun 				(PAGE_SIZE - fofs) : tlen;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
688*4882a593Smuzhiyun 				pages[i], 1, fofs, flen, NULL);
689*4882a593Smuzhiyun 			if (ret < 0) {
690*4882a593Smuzhiyun 				/* current page has beed taken
691*4882a593Smuzhiyun 				 * care of inside above call.
692*4882a593Smuzhiyun 				 */
693*4882a593Smuzhiyun 				i++;
694*4882a593Smuzhiyun 				goto free_pages;
695*4882a593Smuzhiyun 			}
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 			addr += flen;
698*4882a593Smuzhiyun 			tlen -= flen;
699*4882a593Smuzhiyun 		}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		npages -= j;
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	goto done;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* if error, return all pages not managed by pkt */
707*4882a593Smuzhiyun free_pages:
708*4882a593Smuzhiyun 	while (i < j)
709*4882a593Smuzhiyun 		unpin_user_page(pages[i++]);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun done:
712*4882a593Smuzhiyun 	return ret;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
qib_user_sdma_pin_pkt(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)715*4882a593Smuzhiyun static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
716*4882a593Smuzhiyun 				 struct qib_user_sdma_queue *pq,
717*4882a593Smuzhiyun 				 struct qib_user_sdma_pkt *pkt,
718*4882a593Smuzhiyun 				 const struct iovec *iov,
719*4882a593Smuzhiyun 				 unsigned long niov)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	int ret = 0;
722*4882a593Smuzhiyun 	unsigned long idx;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	for (idx = 0; idx < niov; idx++) {
725*4882a593Smuzhiyun 		const size_t npages = qib_user_sdma_num_pages(iov + idx);
726*4882a593Smuzhiyun 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
729*4882a593Smuzhiyun 					      iov[idx].iov_len, npages);
730*4882a593Smuzhiyun 		if (ret < 0)
731*4882a593Smuzhiyun 			goto free_pkt;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	goto done;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun free_pkt:
737*4882a593Smuzhiyun 	/* we need to ignore the first entry here */
738*4882a593Smuzhiyun 	for (idx = 1; idx < pkt->naddr; idx++)
739*4882a593Smuzhiyun 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	/* need to dma unmap the first entry, this is to restore to
742*4882a593Smuzhiyun 	 * the original state so that caller can free the memory in
743*4882a593Smuzhiyun 	 * error condition. Caller does not know if dma mapped or not*/
744*4882a593Smuzhiyun 	if (pkt->addr[0].dma_mapped) {
745*4882a593Smuzhiyun 		dma_unmap_single(&dd->pcidev->dev,
746*4882a593Smuzhiyun 		       pkt->addr[0].addr,
747*4882a593Smuzhiyun 		       pkt->addr[0].dma_length,
748*4882a593Smuzhiyun 		       DMA_TO_DEVICE);
749*4882a593Smuzhiyun 		pkt->addr[0].addr = 0;
750*4882a593Smuzhiyun 		pkt->addr[0].dma_mapped = 0;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun done:
754*4882a593Smuzhiyun 	return ret;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
qib_user_sdma_init_payload(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov,int npages)757*4882a593Smuzhiyun static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
758*4882a593Smuzhiyun 				      struct qib_user_sdma_queue *pq,
759*4882a593Smuzhiyun 				      struct qib_user_sdma_pkt *pkt,
760*4882a593Smuzhiyun 				      const struct iovec *iov,
761*4882a593Smuzhiyun 				      unsigned long niov, int npages)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun 	int ret = 0;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	if (pkt->frag_size == pkt->bytes_togo &&
766*4882a593Smuzhiyun 			npages >= ARRAY_SIZE(pkt->addr))
767*4882a593Smuzhiyun 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
768*4882a593Smuzhiyun 	else
769*4882a593Smuzhiyun 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	return ret;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun /* free a packet list -- return counter value of last packet */
qib_user_sdma_free_pkt_list(struct device * dev,struct qib_user_sdma_queue * pq,struct list_head * list)775*4882a593Smuzhiyun static void qib_user_sdma_free_pkt_list(struct device *dev,
776*4882a593Smuzhiyun 					struct qib_user_sdma_queue *pq,
777*4882a593Smuzhiyun 					struct list_head *list)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	struct qib_user_sdma_pkt *pkt, *pkt_next;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
782*4882a593Smuzhiyun 		int i;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 		for (i = 0; i < pkt->naddr; i++)
785*4882a593Smuzhiyun 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		if (pkt->largepkt)
788*4882a593Smuzhiyun 			kfree(pkt);
789*4882a593Smuzhiyun 		else
790*4882a593Smuzhiyun 			kmem_cache_free(pq->pkt_slab, pkt);
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 	INIT_LIST_HEAD(list);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun  * copy headers, coalesce etc -- pq->lock must be held
797*4882a593Smuzhiyun  *
798*4882a593Smuzhiyun  * we queue all the packets to list, returning the
799*4882a593Smuzhiyun  * number of bytes total.  list must be empty initially,
800*4882a593Smuzhiyun  * as, if there is an error we clean it...
801*4882a593Smuzhiyun  */
qib_user_sdma_queue_pkts(const struct qib_devdata * dd,struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long niov,struct list_head * list,int * maxpkts,int * ndesc)802*4882a593Smuzhiyun static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
803*4882a593Smuzhiyun 				    struct qib_pportdata *ppd,
804*4882a593Smuzhiyun 				    struct qib_user_sdma_queue *pq,
805*4882a593Smuzhiyun 				    const struct iovec *iov,
806*4882a593Smuzhiyun 				    unsigned long niov,
807*4882a593Smuzhiyun 				    struct list_head *list,
808*4882a593Smuzhiyun 				    int *maxpkts, int *ndesc)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	unsigned long idx = 0;
811*4882a593Smuzhiyun 	int ret = 0;
812*4882a593Smuzhiyun 	int npkts = 0;
813*4882a593Smuzhiyun 	__le32 *pbc;
814*4882a593Smuzhiyun 	dma_addr_t dma_addr;
815*4882a593Smuzhiyun 	struct qib_user_sdma_pkt *pkt = NULL;
816*4882a593Smuzhiyun 	size_t len;
817*4882a593Smuzhiyun 	size_t nw;
818*4882a593Smuzhiyun 	u32 counter = pq->counter;
819*4882a593Smuzhiyun 	u16 frag_size;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	while (idx < niov && npkts < *maxpkts) {
822*4882a593Smuzhiyun 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
823*4882a593Smuzhiyun 		const unsigned long idx_save = idx;
824*4882a593Smuzhiyun 		unsigned pktnw;
825*4882a593Smuzhiyun 		unsigned pktnwc;
826*4882a593Smuzhiyun 		int nfrags = 0;
827*4882a593Smuzhiyun 		size_t npages = 0;
828*4882a593Smuzhiyun 		size_t bytes_togo = 0;
829*4882a593Smuzhiyun 		int tiddma = 0;
830*4882a593Smuzhiyun 		int cfur;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		len = iov[idx].iov_len;
833*4882a593Smuzhiyun 		nw = len >> 2;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
836*4882a593Smuzhiyun 		    len > PAGE_SIZE || len & 3 || addr & 3) {
837*4882a593Smuzhiyun 			ret = -EINVAL;
838*4882a593Smuzhiyun 			goto free_list;
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
842*4882a593Smuzhiyun 		if (!pbc) {
843*4882a593Smuzhiyun 			ret = -ENOMEM;
844*4882a593Smuzhiyun 			goto free_list;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
848*4882a593Smuzhiyun 		if (cfur) {
849*4882a593Smuzhiyun 			ret = -EFAULT;
850*4882a593Smuzhiyun 			goto free_pbc;
851*4882a593Smuzhiyun 		}
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		/*
854*4882a593Smuzhiyun 		 * This assignment is a bit strange.  it's because the
855*4882a593Smuzhiyun 		 * the pbc counts the number of 32 bit words in the full
856*4882a593Smuzhiyun 		 * packet _except_ the first word of the pbc itself...
857*4882a593Smuzhiyun 		 */
858*4882a593Smuzhiyun 		pktnwc = nw - 1;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		/*
861*4882a593Smuzhiyun 		 * pktnw computation yields the number of 32 bit words
862*4882a593Smuzhiyun 		 * that the caller has indicated in the PBC.  note that
863*4882a593Smuzhiyun 		 * this is one less than the total number of words that
864*4882a593Smuzhiyun 		 * goes to the send DMA engine as the first 32 bit word
865*4882a593Smuzhiyun 		 * of the PBC itself is not counted.  Armed with this count,
866*4882a593Smuzhiyun 		 * we can verify that the packet is consistent with the
867*4882a593Smuzhiyun 		 * iovec lengths.
868*4882a593Smuzhiyun 		 */
869*4882a593Smuzhiyun 		pktnw = le32_to_cpu(*pbc) & 0xFFFF;
870*4882a593Smuzhiyun 		if (pktnw < pktnwc) {
871*4882a593Smuzhiyun 			ret = -EINVAL;
872*4882a593Smuzhiyun 			goto free_pbc;
873*4882a593Smuzhiyun 		}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		idx++;
876*4882a593Smuzhiyun 		while (pktnwc < pktnw && idx < niov) {
877*4882a593Smuzhiyun 			const size_t slen = iov[idx].iov_len;
878*4882a593Smuzhiyun 			const unsigned long faddr =
879*4882a593Smuzhiyun 				(unsigned long) iov[idx].iov_base;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 			if (slen & 3 || faddr & 3 || !slen) {
882*4882a593Smuzhiyun 				ret = -EINVAL;
883*4882a593Smuzhiyun 				goto free_pbc;
884*4882a593Smuzhiyun 			}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 			npages += qib_user_sdma_num_pages(&iov[idx]);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 			if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
889*4882a593Smuzhiyun 			    bytes_togo > type_max(typeof(pkt->bytes_togo))) {
890*4882a593Smuzhiyun 				ret = -EINVAL;
891*4882a593Smuzhiyun 				goto free_pbc;
892*4882a593Smuzhiyun 			}
893*4882a593Smuzhiyun 			pktnwc += slen >> 2;
894*4882a593Smuzhiyun 			idx++;
895*4882a593Smuzhiyun 			nfrags++;
896*4882a593Smuzhiyun 		}
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 		if (pktnwc != pktnw) {
899*4882a593Smuzhiyun 			ret = -EINVAL;
900*4882a593Smuzhiyun 			goto free_pbc;
901*4882a593Smuzhiyun 		}
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 		frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
904*4882a593Smuzhiyun 		if (((frag_size ? frag_size : bytes_togo) + len) >
905*4882a593Smuzhiyun 						ppd->ibmaxlen) {
906*4882a593Smuzhiyun 			ret = -EINVAL;
907*4882a593Smuzhiyun 			goto free_pbc;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		if (frag_size) {
911*4882a593Smuzhiyun 			size_t tidsmsize, n, pktsize, sz, addrlimit;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 			n = npages*((2*PAGE_SIZE/frag_size)+1);
914*4882a593Smuzhiyun 			pktsize = struct_size(pkt, addr, n);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 			/*
917*4882a593Smuzhiyun 			 * Determine if this is tid-sdma or just sdma.
918*4882a593Smuzhiyun 			 */
919*4882a593Smuzhiyun 			tiddma = (((le32_to_cpu(pbc[7])>>
920*4882a593Smuzhiyun 				QLOGIC_IB_I_TID_SHIFT)&
921*4882a593Smuzhiyun 				QLOGIC_IB_I_TID_MASK) !=
922*4882a593Smuzhiyun 				QLOGIC_IB_I_TID_MASK);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 			if (tiddma)
925*4882a593Smuzhiyun 				tidsmsize = iov[idx].iov_len;
926*4882a593Smuzhiyun 			else
927*4882a593Smuzhiyun 				tidsmsize = 0;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 			if (check_add_overflow(pktsize, tidsmsize, &sz)) {
930*4882a593Smuzhiyun 				ret = -EINVAL;
931*4882a593Smuzhiyun 				goto free_pbc;
932*4882a593Smuzhiyun 			}
933*4882a593Smuzhiyun 			pkt = kmalloc(sz, GFP_KERNEL);
934*4882a593Smuzhiyun 			if (!pkt) {
935*4882a593Smuzhiyun 				ret = -ENOMEM;
936*4882a593Smuzhiyun 				goto free_pbc;
937*4882a593Smuzhiyun 			}
938*4882a593Smuzhiyun 			pkt->largepkt = 1;
939*4882a593Smuzhiyun 			pkt->frag_size = frag_size;
940*4882a593Smuzhiyun 			if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
941*4882a593Smuzhiyun 					       &addrlimit) ||
942*4882a593Smuzhiyun 			    addrlimit > type_max(typeof(pkt->addrlimit))) {
943*4882a593Smuzhiyun 				ret = -EINVAL;
944*4882a593Smuzhiyun 				goto free_pkt;
945*4882a593Smuzhiyun 			}
946*4882a593Smuzhiyun 			pkt->addrlimit = addrlimit;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 			if (tiddma) {
949*4882a593Smuzhiyun 				char *tidsm = (char *)pkt + pktsize;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 				cfur = copy_from_user(tidsm,
952*4882a593Smuzhiyun 					iov[idx].iov_base, tidsmsize);
953*4882a593Smuzhiyun 				if (cfur) {
954*4882a593Smuzhiyun 					ret = -EFAULT;
955*4882a593Smuzhiyun 					goto free_pkt;
956*4882a593Smuzhiyun 				}
957*4882a593Smuzhiyun 				pkt->tidsm =
958*4882a593Smuzhiyun 					(struct qib_tid_session_member *)tidsm;
959*4882a593Smuzhiyun 				pkt->tidsmcount = tidsmsize/
960*4882a593Smuzhiyun 					sizeof(struct qib_tid_session_member);
961*4882a593Smuzhiyun 				pkt->tidsmidx = 0;
962*4882a593Smuzhiyun 				idx++;
963*4882a593Smuzhiyun 			}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 			/*
966*4882a593Smuzhiyun 			 * pbc 'fill1' field is borrowed to pass frag size,
967*4882a593Smuzhiyun 			 * we need to clear it after picking frag size, the
968*4882a593Smuzhiyun 			 * hardware requires this field to be zero.
969*4882a593Smuzhiyun 			 */
970*4882a593Smuzhiyun 			*pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
971*4882a593Smuzhiyun 		} else {
972*4882a593Smuzhiyun 			pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
973*4882a593Smuzhiyun 			if (!pkt) {
974*4882a593Smuzhiyun 				ret = -ENOMEM;
975*4882a593Smuzhiyun 				goto free_pbc;
976*4882a593Smuzhiyun 			}
977*4882a593Smuzhiyun 			pkt->largepkt = 0;
978*4882a593Smuzhiyun 			pkt->frag_size = bytes_togo;
979*4882a593Smuzhiyun 			pkt->addrlimit = ARRAY_SIZE(pkt->addr);
980*4882a593Smuzhiyun 		}
981*4882a593Smuzhiyun 		pkt->bytes_togo = bytes_togo;
982*4882a593Smuzhiyun 		pkt->payload_size = 0;
983*4882a593Smuzhiyun 		pkt->counter = counter;
984*4882a593Smuzhiyun 		pkt->tiddma = tiddma;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 		/* setup the first header */
987*4882a593Smuzhiyun 		qib_user_sdma_init_frag(pkt, 0, /* index */
988*4882a593Smuzhiyun 			0, len,		/* offset, len */
989*4882a593Smuzhiyun 			1, 0,		/* first last desc */
990*4882a593Smuzhiyun 			0, 0,		/* put page, dma mapped */
991*4882a593Smuzhiyun 			NULL, pbc,	/* struct page, virt addr */
992*4882a593Smuzhiyun 			dma_addr, len);	/* dma addr, dma length */
993*4882a593Smuzhiyun 		pkt->index = 0;
994*4882a593Smuzhiyun 		pkt->naddr = 1;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 		if (nfrags) {
997*4882a593Smuzhiyun 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
998*4882a593Smuzhiyun 							 iov + idx_save + 1,
999*4882a593Smuzhiyun 							 nfrags, npages);
1000*4882a593Smuzhiyun 			if (ret < 0)
1001*4882a593Smuzhiyun 				goto free_pkt;
1002*4882a593Smuzhiyun 		} else {
1003*4882a593Smuzhiyun 			/* since there is no payload, mark the
1004*4882a593Smuzhiyun 			 * header as the last desc. */
1005*4882a593Smuzhiyun 			pkt->addr[0].last_desc = 1;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 			if (dma_addr == 0) {
1008*4882a593Smuzhiyun 				/*
1009*4882a593Smuzhiyun 				 * the header is not dma mapped yet.
1010*4882a593Smuzhiyun 				 * it should be from kmalloc.
1011*4882a593Smuzhiyun 				 */
1012*4882a593Smuzhiyun 				dma_addr = dma_map_single(&dd->pcidev->dev,
1013*4882a593Smuzhiyun 					pbc, len, DMA_TO_DEVICE);
1014*4882a593Smuzhiyun 				if (dma_mapping_error(&dd->pcidev->dev,
1015*4882a593Smuzhiyun 								dma_addr)) {
1016*4882a593Smuzhiyun 					ret = -ENOMEM;
1017*4882a593Smuzhiyun 					goto free_pkt;
1018*4882a593Smuzhiyun 				}
1019*4882a593Smuzhiyun 				pkt->addr[0].addr = dma_addr;
1020*4882a593Smuzhiyun 				pkt->addr[0].dma_mapped = 1;
1021*4882a593Smuzhiyun 			}
1022*4882a593Smuzhiyun 		}
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		counter++;
1025*4882a593Smuzhiyun 		npkts++;
1026*4882a593Smuzhiyun 		pkt->pq = pq;
1027*4882a593Smuzhiyun 		pkt->index = 0; /* reset index for push on hw */
1028*4882a593Smuzhiyun 		*ndesc += pkt->naddr;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 		list_add_tail(&pkt->list, list);
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	*maxpkts = npkts;
1034*4882a593Smuzhiyun 	ret = idx;
1035*4882a593Smuzhiyun 	goto done;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun free_pkt:
1038*4882a593Smuzhiyun 	if (pkt->largepkt)
1039*4882a593Smuzhiyun 		kfree(pkt);
1040*4882a593Smuzhiyun 	else
1041*4882a593Smuzhiyun 		kmem_cache_free(pq->pkt_slab, pkt);
1042*4882a593Smuzhiyun free_pbc:
1043*4882a593Smuzhiyun 	if (dma_addr)
1044*4882a593Smuzhiyun 		dma_pool_free(pq->header_cache, pbc, dma_addr);
1045*4882a593Smuzhiyun 	else
1046*4882a593Smuzhiyun 		kfree(pbc);
1047*4882a593Smuzhiyun free_list:
1048*4882a593Smuzhiyun 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1049*4882a593Smuzhiyun done:
1050*4882a593Smuzhiyun 	return ret;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun 
qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue * pq,u32 c)1053*4882a593Smuzhiyun static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1054*4882a593Smuzhiyun 					       u32 c)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	pq->sent_counter = c;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun /* try to clean out queue -- needs pq->lock */
qib_user_sdma_queue_clean(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1060*4882a593Smuzhiyun static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1061*4882a593Smuzhiyun 				     struct qib_user_sdma_queue *pq)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	struct qib_devdata *dd = ppd->dd;
1064*4882a593Smuzhiyun 	struct list_head free_list;
1065*4882a593Smuzhiyun 	struct qib_user_sdma_pkt *pkt;
1066*4882a593Smuzhiyun 	struct qib_user_sdma_pkt *pkt_prev;
1067*4882a593Smuzhiyun 	unsigned long flags;
1068*4882a593Smuzhiyun 	int ret = 0;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	if (!pq->num_sending)
1071*4882a593Smuzhiyun 		return 0;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	INIT_LIST_HEAD(&free_list);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/*
1076*4882a593Smuzhiyun 	 * We need this spin lock here because interrupt handler
1077*4882a593Smuzhiyun 	 * might modify this list in qib_user_sdma_send_desc(), also
1078*4882a593Smuzhiyun 	 * we can not get interrupted, otherwise it is a deadlock.
1079*4882a593Smuzhiyun 	 */
1080*4882a593Smuzhiyun 	spin_lock_irqsave(&pq->sent_lock, flags);
1081*4882a593Smuzhiyun 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1082*4882a593Smuzhiyun 		s64 descd = ppd->sdma_descq_removed - pkt->added;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 		if (descd < 0)
1085*4882a593Smuzhiyun 			break;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 		list_move_tail(&pkt->list, &free_list);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 		/* one more packet cleaned */
1090*4882a593Smuzhiyun 		ret++;
1091*4882a593Smuzhiyun 		pq->num_sending--;
1092*4882a593Smuzhiyun 	}
1093*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pq->sent_lock, flags);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	if (!list_empty(&free_list)) {
1096*4882a593Smuzhiyun 		u32 counter;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 		pkt = list_entry(free_list.prev,
1099*4882a593Smuzhiyun 				 struct qib_user_sdma_pkt, list);
1100*4882a593Smuzhiyun 		counter = pkt->counter;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1103*4882a593Smuzhiyun 		qib_user_sdma_set_complete_counter(pq, counter);
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return ret;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
qib_user_sdma_queue_destroy(struct qib_user_sdma_queue * pq)1109*4882a593Smuzhiyun void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	if (!pq)
1112*4882a593Smuzhiyun 		return;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	pq->sdma_rb_node->refcount--;
1115*4882a593Smuzhiyun 	if (pq->sdma_rb_node->refcount == 0) {
1116*4882a593Smuzhiyun 		rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1117*4882a593Smuzhiyun 		kfree(pq->sdma_rb_node);
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun 	dma_pool_destroy(pq->header_cache);
1120*4882a593Smuzhiyun 	kmem_cache_destroy(pq->pkt_slab);
1121*4882a593Smuzhiyun 	kfree(pq);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun /* clean descriptor queue, returns > 0 if some elements cleaned */
qib_user_sdma_hwqueue_clean(struct qib_pportdata * ppd)1125*4882a593Smuzhiyun static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	int ret;
1128*4882a593Smuzhiyun 	unsigned long flags;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1131*4882a593Smuzhiyun 	ret = qib_sdma_make_progress(ppd);
1132*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return ret;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /* we're in close, drain packets so that we can cleanup successfully... */
qib_user_sdma_queue_drain(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1138*4882a593Smuzhiyun void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1139*4882a593Smuzhiyun 			       struct qib_user_sdma_queue *pq)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	struct qib_devdata *dd = ppd->dd;
1142*4882a593Smuzhiyun 	unsigned long flags;
1143*4882a593Smuzhiyun 	int i;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	if (!pq)
1146*4882a593Smuzhiyun 		return;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1149*4882a593Smuzhiyun 		mutex_lock(&pq->lock);
1150*4882a593Smuzhiyun 		if (!pq->num_pending && !pq->num_sending) {
1151*4882a593Smuzhiyun 			mutex_unlock(&pq->lock);
1152*4882a593Smuzhiyun 			break;
1153*4882a593Smuzhiyun 		}
1154*4882a593Smuzhiyun 		qib_user_sdma_hwqueue_clean(ppd);
1155*4882a593Smuzhiyun 		qib_user_sdma_queue_clean(ppd, pq);
1156*4882a593Smuzhiyun 		mutex_unlock(&pq->lock);
1157*4882a593Smuzhiyun 		msleep(20);
1158*4882a593Smuzhiyun 	}
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	if (pq->num_pending || pq->num_sending) {
1161*4882a593Smuzhiyun 		struct qib_user_sdma_pkt *pkt;
1162*4882a593Smuzhiyun 		struct qib_user_sdma_pkt *pkt_prev;
1163*4882a593Smuzhiyun 		struct list_head free_list;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 		mutex_lock(&pq->lock);
1166*4882a593Smuzhiyun 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1167*4882a593Smuzhiyun 		/*
1168*4882a593Smuzhiyun 		 * Since we hold sdma_lock, it is safe without sent_lock.
1169*4882a593Smuzhiyun 		 */
1170*4882a593Smuzhiyun 		if (pq->num_pending) {
1171*4882a593Smuzhiyun 			list_for_each_entry_safe(pkt, pkt_prev,
1172*4882a593Smuzhiyun 					&ppd->sdma_userpending, list) {
1173*4882a593Smuzhiyun 				if (pkt->pq == pq) {
1174*4882a593Smuzhiyun 					list_move_tail(&pkt->list, &pq->sent);
1175*4882a593Smuzhiyun 					pq->num_pending--;
1176*4882a593Smuzhiyun 					pq->num_sending++;
1177*4882a593Smuzhiyun 				}
1178*4882a593Smuzhiyun 			}
1179*4882a593Smuzhiyun 		}
1180*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1183*4882a593Smuzhiyun 		INIT_LIST_HEAD(&free_list);
1184*4882a593Smuzhiyun 		list_splice_init(&pq->sent, &free_list);
1185*4882a593Smuzhiyun 		pq->num_sending = 0;
1186*4882a593Smuzhiyun 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1187*4882a593Smuzhiyun 		mutex_unlock(&pq->lock);
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
qib_sdma_make_desc0(u8 gen,u64 addr,u64 dwlen,u64 dwoffset)1191*4882a593Smuzhiyun static inline __le64 qib_sdma_make_desc0(u8 gen,
1192*4882a593Smuzhiyun 					 u64 addr, u64 dwlen, u64 dwoffset)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1195*4882a593Smuzhiyun 			   ((addr & 0xfffffffcULL) << 32) |
1196*4882a593Smuzhiyun 			   /* SDmaGeneration[1:0] */
1197*4882a593Smuzhiyun 			   ((gen & 3ULL) << 30) |
1198*4882a593Smuzhiyun 			   /* SDmaDwordCount[10:0] */
1199*4882a593Smuzhiyun 			   ((dwlen & 0x7ffULL) << 16) |
1200*4882a593Smuzhiyun 			   /* SDmaBufOffset[12:2] */
1201*4882a593Smuzhiyun 			   (dwoffset & 0x7ffULL));
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
qib_sdma_make_first_desc0(__le64 descq)1204*4882a593Smuzhiyun static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	return descq | cpu_to_le64(1ULL << 12);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
qib_sdma_make_last_desc0(__le64 descq)1209*4882a593Smuzhiyun static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 					      /* last */  /* dma head */
1212*4882a593Smuzhiyun 	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun 
qib_sdma_make_desc1(u64 addr)1215*4882a593Smuzhiyun static inline __le64 qib_sdma_make_desc1(u64 addr)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	/* SDmaPhyAddr[47:32] */
1218*4882a593Smuzhiyun 	return cpu_to_le64(addr >> 32);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun 
qib_user_sdma_send_frag(struct qib_pportdata * ppd,struct qib_user_sdma_pkt * pkt,int idx,unsigned ofs,u16 tail,u8 gen)1221*4882a593Smuzhiyun static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1222*4882a593Smuzhiyun 				    struct qib_user_sdma_pkt *pkt, int idx,
1223*4882a593Smuzhiyun 				    unsigned ofs, u16 tail, u8 gen)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun 	const u64 addr = (u64) pkt->addr[idx].addr +
1226*4882a593Smuzhiyun 		(u64) pkt->addr[idx].offset;
1227*4882a593Smuzhiyun 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1228*4882a593Smuzhiyun 	__le64 *descqp;
1229*4882a593Smuzhiyun 	__le64 descq0;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	descqp = &ppd->sdma_descq[tail].qw[0];
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1234*4882a593Smuzhiyun 	if (pkt->addr[idx].first_desc)
1235*4882a593Smuzhiyun 		descq0 = qib_sdma_make_first_desc0(descq0);
1236*4882a593Smuzhiyun 	if (pkt->addr[idx].last_desc) {
1237*4882a593Smuzhiyun 		descq0 = qib_sdma_make_last_desc0(descq0);
1238*4882a593Smuzhiyun 		if (ppd->sdma_intrequest) {
1239*4882a593Smuzhiyun 			descq0 |= cpu_to_le64(1ULL << 15);
1240*4882a593Smuzhiyun 			ppd->sdma_intrequest = 0;
1241*4882a593Smuzhiyun 		}
1242*4882a593Smuzhiyun 	}
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	descqp[0] = descq0;
1245*4882a593Smuzhiyun 	descqp[1] = qib_sdma_make_desc1(addr);
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
qib_user_sdma_send_desc(struct qib_pportdata * ppd,struct list_head * pktlist)1248*4882a593Smuzhiyun void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1249*4882a593Smuzhiyun 				struct list_head *pktlist)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun 	struct qib_devdata *dd = ppd->dd;
1252*4882a593Smuzhiyun 	u16 nfree, nsent;
1253*4882a593Smuzhiyun 	u16 tail, tail_c;
1254*4882a593Smuzhiyun 	u8 gen, gen_c;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	nfree = qib_sdma_descq_freecnt(ppd);
1257*4882a593Smuzhiyun 	if (!nfree)
1258*4882a593Smuzhiyun 		return;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun retry:
1261*4882a593Smuzhiyun 	nsent = 0;
1262*4882a593Smuzhiyun 	tail_c = tail = ppd->sdma_descq_tail;
1263*4882a593Smuzhiyun 	gen_c = gen = ppd->sdma_generation;
1264*4882a593Smuzhiyun 	while (!list_empty(pktlist)) {
1265*4882a593Smuzhiyun 		struct qib_user_sdma_pkt *pkt =
1266*4882a593Smuzhiyun 			list_entry(pktlist->next, struct qib_user_sdma_pkt,
1267*4882a593Smuzhiyun 				   list);
1268*4882a593Smuzhiyun 		int i, j, c = 0;
1269*4882a593Smuzhiyun 		unsigned ofs = 0;
1270*4882a593Smuzhiyun 		u16 dtail = tail;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 		for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1273*4882a593Smuzhiyun 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1274*4882a593Smuzhiyun 			ofs += pkt->addr[i].length >> 2;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 			if (++tail == ppd->sdma_descq_cnt) {
1277*4882a593Smuzhiyun 				tail = 0;
1278*4882a593Smuzhiyun 				++gen;
1279*4882a593Smuzhiyun 				ppd->sdma_intrequest = 1;
1280*4882a593Smuzhiyun 			} else if (tail == (ppd->sdma_descq_cnt>>1)) {
1281*4882a593Smuzhiyun 				ppd->sdma_intrequest = 1;
1282*4882a593Smuzhiyun 			}
1283*4882a593Smuzhiyun 			nfree--;
1284*4882a593Smuzhiyun 			if (pkt->addr[i].last_desc == 0)
1285*4882a593Smuzhiyun 				continue;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 			/*
1288*4882a593Smuzhiyun 			 * If the packet is >= 2KB mtu equivalent, we
1289*4882a593Smuzhiyun 			 * have to use the large buffers, and have to
1290*4882a593Smuzhiyun 			 * mark each descriptor as part of a large
1291*4882a593Smuzhiyun 			 * buffer packet.
1292*4882a593Smuzhiyun 			 */
1293*4882a593Smuzhiyun 			if (ofs > dd->piosize2kmax_dwords) {
1294*4882a593Smuzhiyun 				for (j = pkt->index; j <= i; j++) {
1295*4882a593Smuzhiyun 					ppd->sdma_descq[dtail].qw[0] |=
1296*4882a593Smuzhiyun 						cpu_to_le64(1ULL << 14);
1297*4882a593Smuzhiyun 					if (++dtail == ppd->sdma_descq_cnt)
1298*4882a593Smuzhiyun 						dtail = 0;
1299*4882a593Smuzhiyun 				}
1300*4882a593Smuzhiyun 			}
1301*4882a593Smuzhiyun 			c += i + 1 - pkt->index;
1302*4882a593Smuzhiyun 			pkt->index = i + 1; /* index for next first */
1303*4882a593Smuzhiyun 			tail_c = dtail = tail;
1304*4882a593Smuzhiyun 			gen_c = gen;
1305*4882a593Smuzhiyun 			ofs = 0;  /* reset for next packet */
1306*4882a593Smuzhiyun 		}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		ppd->sdma_descq_added += c;
1309*4882a593Smuzhiyun 		nsent += c;
1310*4882a593Smuzhiyun 		if (pkt->index == pkt->naddr) {
1311*4882a593Smuzhiyun 			pkt->added = ppd->sdma_descq_added;
1312*4882a593Smuzhiyun 			pkt->pq->added = pkt->added;
1313*4882a593Smuzhiyun 			pkt->pq->num_pending--;
1314*4882a593Smuzhiyun 			spin_lock(&pkt->pq->sent_lock);
1315*4882a593Smuzhiyun 			pkt->pq->num_sending++;
1316*4882a593Smuzhiyun 			list_move_tail(&pkt->list, &pkt->pq->sent);
1317*4882a593Smuzhiyun 			spin_unlock(&pkt->pq->sent_lock);
1318*4882a593Smuzhiyun 		}
1319*4882a593Smuzhiyun 		if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1320*4882a593Smuzhiyun 			break;
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	/* advance the tail on the chip if necessary */
1324*4882a593Smuzhiyun 	if (ppd->sdma_descq_tail != tail_c) {
1325*4882a593Smuzhiyun 		ppd->sdma_generation = gen_c;
1326*4882a593Smuzhiyun 		dd->f_sdma_update_tail(ppd, tail_c);
1327*4882a593Smuzhiyun 	}
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	if (nfree && !list_empty(pktlist))
1330*4882a593Smuzhiyun 		goto retry;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun /* pq->lock must be held, get packets on the wire... */
qib_user_sdma_push_pkts(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,struct list_head * pktlist,int count)1334*4882a593Smuzhiyun static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1335*4882a593Smuzhiyun 				 struct qib_user_sdma_queue *pq,
1336*4882a593Smuzhiyun 				 struct list_head *pktlist, int count)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun 	unsigned long flags;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1341*4882a593Smuzhiyun 		return -ECOMM;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* non-blocking mode */
1344*4882a593Smuzhiyun 	if (pq->sdma_rb_node->refcount > 1) {
1345*4882a593Smuzhiyun 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1346*4882a593Smuzhiyun 		if (unlikely(!__qib_sdma_running(ppd))) {
1347*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1348*4882a593Smuzhiyun 			return -ECOMM;
1349*4882a593Smuzhiyun 		}
1350*4882a593Smuzhiyun 		pq->num_pending += count;
1351*4882a593Smuzhiyun 		list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1352*4882a593Smuzhiyun 		qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1353*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1354*4882a593Smuzhiyun 		return 0;
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	/* In this case, descriptors from this process are not
1358*4882a593Smuzhiyun 	 * linked to ppd pending queue, interrupt handler
1359*4882a593Smuzhiyun 	 * won't update this process, it is OK to directly
1360*4882a593Smuzhiyun 	 * modify without sdma lock.
1361*4882a593Smuzhiyun 	 */
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	pq->num_pending += count;
1365*4882a593Smuzhiyun 	/*
1366*4882a593Smuzhiyun 	 * Blocking mode for single rail process, we must
1367*4882a593Smuzhiyun 	 * release/regain sdma_lock to give other process
1368*4882a593Smuzhiyun 	 * chance to make progress. This is important for
1369*4882a593Smuzhiyun 	 * performance.
1370*4882a593Smuzhiyun 	 */
1371*4882a593Smuzhiyun 	do {
1372*4882a593Smuzhiyun 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1373*4882a593Smuzhiyun 		if (unlikely(!__qib_sdma_running(ppd))) {
1374*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1375*4882a593Smuzhiyun 			return -ECOMM;
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 		qib_user_sdma_send_desc(ppd, pktlist);
1378*4882a593Smuzhiyun 		if (!list_empty(pktlist))
1379*4882a593Smuzhiyun 			qib_sdma_make_progress(ppd);
1380*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1381*4882a593Smuzhiyun 	} while (!list_empty(pktlist));
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	return 0;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun 
qib_user_sdma_writev(struct qib_ctxtdata * rcd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long dim)1386*4882a593Smuzhiyun int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1387*4882a593Smuzhiyun 			 struct qib_user_sdma_queue *pq,
1388*4882a593Smuzhiyun 			 const struct iovec *iov,
1389*4882a593Smuzhiyun 			 unsigned long dim)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	struct qib_devdata *dd = rcd->dd;
1392*4882a593Smuzhiyun 	struct qib_pportdata *ppd = rcd->ppd;
1393*4882a593Smuzhiyun 	int ret = 0;
1394*4882a593Smuzhiyun 	struct list_head list;
1395*4882a593Smuzhiyun 	int npkts = 0;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	INIT_LIST_HEAD(&list);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	mutex_lock(&pq->lock);
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1402*4882a593Smuzhiyun 	if (!qib_sdma_running(ppd))
1403*4882a593Smuzhiyun 		goto done_unlock;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	/* if I have packets not complete yet */
1406*4882a593Smuzhiyun 	if (pq->added > ppd->sdma_descq_removed)
1407*4882a593Smuzhiyun 		qib_user_sdma_hwqueue_clean(ppd);
1408*4882a593Smuzhiyun 	/* if I have complete packets to be freed */
1409*4882a593Smuzhiyun 	if (pq->num_sending)
1410*4882a593Smuzhiyun 		qib_user_sdma_queue_clean(ppd, pq);
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	while (dim) {
1413*4882a593Smuzhiyun 		int mxp = 1;
1414*4882a593Smuzhiyun 		int ndesc = 0;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 		ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1417*4882a593Smuzhiyun 				iov, dim, &list, &mxp, &ndesc);
1418*4882a593Smuzhiyun 		if (ret < 0)
1419*4882a593Smuzhiyun 			goto done_unlock;
1420*4882a593Smuzhiyun 		else {
1421*4882a593Smuzhiyun 			dim -= ret;
1422*4882a593Smuzhiyun 			iov += ret;
1423*4882a593Smuzhiyun 		}
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 		/* force packets onto the sdma hw queue... */
1426*4882a593Smuzhiyun 		if (!list_empty(&list)) {
1427*4882a593Smuzhiyun 			/*
1428*4882a593Smuzhiyun 			 * Lazily clean hw queue.
1429*4882a593Smuzhiyun 			 */
1430*4882a593Smuzhiyun 			if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1431*4882a593Smuzhiyun 				qib_user_sdma_hwqueue_clean(ppd);
1432*4882a593Smuzhiyun 				if (pq->num_sending)
1433*4882a593Smuzhiyun 					qib_user_sdma_queue_clean(ppd, pq);
1434*4882a593Smuzhiyun 			}
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 			ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1437*4882a593Smuzhiyun 			if (ret < 0)
1438*4882a593Smuzhiyun 				goto done_unlock;
1439*4882a593Smuzhiyun 			else {
1440*4882a593Smuzhiyun 				npkts += mxp;
1441*4882a593Smuzhiyun 				pq->counter += mxp;
1442*4882a593Smuzhiyun 			}
1443*4882a593Smuzhiyun 		}
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun done_unlock:
1447*4882a593Smuzhiyun 	if (!list_empty(&list))
1448*4882a593Smuzhiyun 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1449*4882a593Smuzhiyun 	mutex_unlock(&pq->lock);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	return (ret < 0) ? ret : npkts;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun 
qib_user_sdma_make_progress(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1454*4882a593Smuzhiyun int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1455*4882a593Smuzhiyun 				struct qib_user_sdma_queue *pq)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	int ret = 0;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	mutex_lock(&pq->lock);
1460*4882a593Smuzhiyun 	qib_user_sdma_hwqueue_clean(ppd);
1461*4882a593Smuzhiyun 	ret = qib_user_sdma_queue_clean(ppd, pq);
1462*4882a593Smuzhiyun 	mutex_unlock(&pq->lock);
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	return ret;
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun 
qib_user_sdma_complete_counter(const struct qib_user_sdma_queue * pq)1467*4882a593Smuzhiyun u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun 	return pq ? pq->sent_counter : 0;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
qib_user_sdma_inflight_counter(struct qib_user_sdma_queue * pq)1472*4882a593Smuzhiyun u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	return pq ? pq->counter : 0;
1475*4882a593Smuzhiyun }
1476