xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hfi1/user_sdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2020 - Cornelis Networks, Inc.
3*4882a593Smuzhiyun  * Copyright(c) 2015 - 2018 Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
6*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun  * published by the Free Software Foundation.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17*4882a593Smuzhiyun  * General Public License for more details.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * BSD LICENSE
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
22*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
23*4882a593Smuzhiyun  * are met:
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *  - Redistributions of source code must retain the above copyright
26*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
27*4882a593Smuzhiyun  *  - Redistributions in binary form must reproduce the above copyright
28*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
29*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
30*4882a593Smuzhiyun  *    distribution.
31*4882a593Smuzhiyun  *  - Neither the name of Intel Corporation nor the names of its
32*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
33*4882a593Smuzhiyun  *    from this software without specific prior written permission.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun #include <linux/mm.h>
49*4882a593Smuzhiyun #include <linux/types.h>
50*4882a593Smuzhiyun #include <linux/device.h>
51*4882a593Smuzhiyun #include <linux/dmapool.h>
52*4882a593Smuzhiyun #include <linux/slab.h>
53*4882a593Smuzhiyun #include <linux/list.h>
54*4882a593Smuzhiyun #include <linux/highmem.h>
55*4882a593Smuzhiyun #include <linux/io.h>
56*4882a593Smuzhiyun #include <linux/uio.h>
57*4882a593Smuzhiyun #include <linux/rbtree.h>
58*4882a593Smuzhiyun #include <linux/spinlock.h>
59*4882a593Smuzhiyun #include <linux/delay.h>
60*4882a593Smuzhiyun #include <linux/kthread.h>
61*4882a593Smuzhiyun #include <linux/mmu_context.h>
62*4882a593Smuzhiyun #include <linux/module.h>
63*4882a593Smuzhiyun #include <linux/vmalloc.h>
64*4882a593Smuzhiyun #include <linux/string.h>
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #include "hfi.h"
67*4882a593Smuzhiyun #include "sdma.h"
68*4882a593Smuzhiyun #include "mmu_rb.h"
69*4882a593Smuzhiyun #include "user_sdma.h"
70*4882a593Smuzhiyun #include "verbs.h"  /* for the headers */
71*4882a593Smuzhiyun #include "common.h" /* for struct hfi1_tid_info */
72*4882a593Smuzhiyun #include "trace.h"
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static uint hfi1_sdma_comp_ring_size = 128;
75*4882a593Smuzhiyun module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
76*4882a593Smuzhiyun MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static unsigned initial_pkt_count = 8;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
81*4882a593Smuzhiyun static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
82*4882a593Smuzhiyun static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
83*4882a593Smuzhiyun static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
84*4882a593Smuzhiyun static int pin_vector_pages(struct user_sdma_request *req,
85*4882a593Smuzhiyun 			    struct user_sdma_iovec *iovec);
86*4882a593Smuzhiyun static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
87*4882a593Smuzhiyun 			       unsigned start, unsigned npages);
88*4882a593Smuzhiyun static int check_header_template(struct user_sdma_request *req,
89*4882a593Smuzhiyun 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
90*4882a593Smuzhiyun 				 u32 datalen);
91*4882a593Smuzhiyun static int set_txreq_header(struct user_sdma_request *req,
92*4882a593Smuzhiyun 			    struct user_sdma_txreq *tx, u32 datalen);
93*4882a593Smuzhiyun static int set_txreq_header_ahg(struct user_sdma_request *req,
94*4882a593Smuzhiyun 				struct user_sdma_txreq *tx, u32 len);
95*4882a593Smuzhiyun static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
96*4882a593Smuzhiyun 				  struct hfi1_user_sdma_comp_q *cq,
97*4882a593Smuzhiyun 				  u16 idx, enum hfi1_sdma_comp_state state,
98*4882a593Smuzhiyun 				  int ret);
99*4882a593Smuzhiyun static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
100*4882a593Smuzhiyun static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static int defer_packet_queue(
103*4882a593Smuzhiyun 	struct sdma_engine *sde,
104*4882a593Smuzhiyun 	struct iowait_work *wait,
105*4882a593Smuzhiyun 	struct sdma_txreq *txreq,
106*4882a593Smuzhiyun 	uint seq,
107*4882a593Smuzhiyun 	bool pkts_sent);
108*4882a593Smuzhiyun static void activate_packet_queue(struct iowait *wait, int reason);
109*4882a593Smuzhiyun static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
110*4882a593Smuzhiyun 			   unsigned long len);
111*4882a593Smuzhiyun static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
112*4882a593Smuzhiyun static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
113*4882a593Smuzhiyun 			 void *arg2, bool *stop);
114*4882a593Smuzhiyun static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
115*4882a593Smuzhiyun static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun static struct mmu_rb_ops sdma_rb_ops = {
118*4882a593Smuzhiyun 	.filter = sdma_rb_filter,
119*4882a593Smuzhiyun 	.insert = sdma_rb_insert,
120*4882a593Smuzhiyun 	.evict = sdma_rb_evict,
121*4882a593Smuzhiyun 	.remove = sdma_rb_remove,
122*4882a593Smuzhiyun 	.invalidate = sdma_rb_invalidate
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
defer_packet_queue(struct sdma_engine * sde,struct iowait_work * wait,struct sdma_txreq * txreq,uint seq,bool pkts_sent)125*4882a593Smuzhiyun static int defer_packet_queue(
126*4882a593Smuzhiyun 	struct sdma_engine *sde,
127*4882a593Smuzhiyun 	struct iowait_work *wait,
128*4882a593Smuzhiyun 	struct sdma_txreq *txreq,
129*4882a593Smuzhiyun 	uint seq,
130*4882a593Smuzhiyun 	bool pkts_sent)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq =
133*4882a593Smuzhiyun 		container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	write_seqlock(&sde->waitlock);
136*4882a593Smuzhiyun 	if (sdma_progress(sde, seq, txreq))
137*4882a593Smuzhiyun 		goto eagain;
138*4882a593Smuzhiyun 	/*
139*4882a593Smuzhiyun 	 * We are assuming that if the list is enqueued somewhere, it
140*4882a593Smuzhiyun 	 * is to the dmawait list since that is the only place where
141*4882a593Smuzhiyun 	 * it is supposed to be enqueued.
142*4882a593Smuzhiyun 	 */
143*4882a593Smuzhiyun 	xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
144*4882a593Smuzhiyun 	if (list_empty(&pq->busy.list)) {
145*4882a593Smuzhiyun 		pq->busy.lock = &sde->waitlock;
146*4882a593Smuzhiyun 		iowait_get_priority(&pq->busy);
147*4882a593Smuzhiyun 		iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 	write_sequnlock(&sde->waitlock);
150*4882a593Smuzhiyun 	return -EBUSY;
151*4882a593Smuzhiyun eagain:
152*4882a593Smuzhiyun 	write_sequnlock(&sde->waitlock);
153*4882a593Smuzhiyun 	return -EAGAIN;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
activate_packet_queue(struct iowait * wait,int reason)156*4882a593Smuzhiyun static void activate_packet_queue(struct iowait *wait, int reason)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq =
159*4882a593Smuzhiyun 		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
160*4882a593Smuzhiyun 	pq->busy.lock = NULL;
161*4882a593Smuzhiyun 	xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
162*4882a593Smuzhiyun 	wake_up(&wait->wait_dma);
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata * uctxt,struct hfi1_filedata * fd)165*4882a593Smuzhiyun int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
166*4882a593Smuzhiyun 				struct hfi1_filedata *fd)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	int ret = -ENOMEM;
169*4882a593Smuzhiyun 	char buf[64];
170*4882a593Smuzhiyun 	struct hfi1_devdata *dd;
171*4882a593Smuzhiyun 	struct hfi1_user_sdma_comp_q *cq;
172*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!uctxt || !fd)
175*4882a593Smuzhiyun 		return -EBADF;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (!hfi1_sdma_comp_ring_size)
178*4882a593Smuzhiyun 		return -EINVAL;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	dd = uctxt->dd;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	pq = kzalloc(sizeof(*pq), GFP_KERNEL);
183*4882a593Smuzhiyun 	if (!pq)
184*4882a593Smuzhiyun 		return -ENOMEM;
185*4882a593Smuzhiyun 	pq->dd = dd;
186*4882a593Smuzhiyun 	pq->ctxt = uctxt->ctxt;
187*4882a593Smuzhiyun 	pq->subctxt = fd->subctxt;
188*4882a593Smuzhiyun 	pq->n_max_reqs = hfi1_sdma_comp_ring_size;
189*4882a593Smuzhiyun 	atomic_set(&pq->n_reqs, 0);
190*4882a593Smuzhiyun 	init_waitqueue_head(&pq->wait);
191*4882a593Smuzhiyun 	atomic_set(&pq->n_locked, 0);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
194*4882a593Smuzhiyun 		    activate_packet_queue, NULL, NULL);
195*4882a593Smuzhiyun 	pq->reqidx = 0;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
198*4882a593Smuzhiyun 			   sizeof(*pq->reqs),
199*4882a593Smuzhiyun 			   GFP_KERNEL);
200*4882a593Smuzhiyun 	if (!pq->reqs)
201*4882a593Smuzhiyun 		goto pq_reqs_nomem;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
204*4882a593Smuzhiyun 				 sizeof(*pq->req_in_use),
205*4882a593Smuzhiyun 				 GFP_KERNEL);
206*4882a593Smuzhiyun 	if (!pq->req_in_use)
207*4882a593Smuzhiyun 		goto pq_reqs_no_in_use;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
210*4882a593Smuzhiyun 		 fd->subctxt);
211*4882a593Smuzhiyun 	pq->txreq_cache = kmem_cache_create(buf,
212*4882a593Smuzhiyun 					    sizeof(struct user_sdma_txreq),
213*4882a593Smuzhiyun 					    L1_CACHE_BYTES,
214*4882a593Smuzhiyun 					    SLAB_HWCACHE_ALIGN,
215*4882a593Smuzhiyun 					    NULL);
216*4882a593Smuzhiyun 	if (!pq->txreq_cache) {
217*4882a593Smuzhiyun 		dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
218*4882a593Smuzhiyun 			   uctxt->ctxt);
219*4882a593Smuzhiyun 		goto pq_txreq_nomem;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
223*4882a593Smuzhiyun 	if (!cq)
224*4882a593Smuzhiyun 		goto cq_nomem;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
227*4882a593Smuzhiyun 				 * hfi1_sdma_comp_ring_size));
228*4882a593Smuzhiyun 	if (!cq->comps)
229*4882a593Smuzhiyun 		goto cq_comps_nomem;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	cq->nentries = hfi1_sdma_comp_ring_size;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
234*4882a593Smuzhiyun 				   &pq->handler);
235*4882a593Smuzhiyun 	if (ret) {
236*4882a593Smuzhiyun 		dd_dev_err(dd, "Failed to register with MMU %d", ret);
237*4882a593Smuzhiyun 		goto pq_mmu_fail;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	rcu_assign_pointer(fd->pq, pq);
241*4882a593Smuzhiyun 	fd->cq = cq;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun pq_mmu_fail:
246*4882a593Smuzhiyun 	vfree(cq->comps);
247*4882a593Smuzhiyun cq_comps_nomem:
248*4882a593Smuzhiyun 	kfree(cq);
249*4882a593Smuzhiyun cq_nomem:
250*4882a593Smuzhiyun 	kmem_cache_destroy(pq->txreq_cache);
251*4882a593Smuzhiyun pq_txreq_nomem:
252*4882a593Smuzhiyun 	kfree(pq->req_in_use);
253*4882a593Smuzhiyun pq_reqs_no_in_use:
254*4882a593Smuzhiyun 	kfree(pq->reqs);
255*4882a593Smuzhiyun pq_reqs_nomem:
256*4882a593Smuzhiyun 	kfree(pq);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return ret;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
flush_pq_iowait(struct hfi1_user_sdma_pkt_q * pq)261*4882a593Smuzhiyun static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	unsigned long flags;
264*4882a593Smuzhiyun 	seqlock_t *lock = pq->busy.lock;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (!lock)
267*4882a593Smuzhiyun 		return;
268*4882a593Smuzhiyun 	write_seqlock_irqsave(lock, flags);
269*4882a593Smuzhiyun 	if (!list_empty(&pq->busy.list)) {
270*4882a593Smuzhiyun 		list_del_init(&pq->busy.list);
271*4882a593Smuzhiyun 		pq->busy.lock = NULL;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 	write_sequnlock_irqrestore(lock, flags);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
hfi1_user_sdma_free_queues(struct hfi1_filedata * fd,struct hfi1_ctxtdata * uctxt)276*4882a593Smuzhiyun int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
277*4882a593Smuzhiyun 			       struct hfi1_ctxtdata *uctxt)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	spin_lock(&fd->pq_rcu_lock);
284*4882a593Smuzhiyun 	pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
285*4882a593Smuzhiyun 				    lockdep_is_held(&fd->pq_rcu_lock));
286*4882a593Smuzhiyun 	if (pq) {
287*4882a593Smuzhiyun 		rcu_assign_pointer(fd->pq, NULL);
288*4882a593Smuzhiyun 		spin_unlock(&fd->pq_rcu_lock);
289*4882a593Smuzhiyun 		synchronize_srcu(&fd->pq_srcu);
290*4882a593Smuzhiyun 		/* at this point there can be no more new requests */
291*4882a593Smuzhiyun 		if (pq->handler)
292*4882a593Smuzhiyun 			hfi1_mmu_rb_unregister(pq->handler);
293*4882a593Smuzhiyun 		iowait_sdma_drain(&pq->busy);
294*4882a593Smuzhiyun 		/* Wait until all requests have been freed. */
295*4882a593Smuzhiyun 		wait_event_interruptible(
296*4882a593Smuzhiyun 			pq->wait,
297*4882a593Smuzhiyun 			!atomic_read(&pq->n_reqs));
298*4882a593Smuzhiyun 		kfree(pq->reqs);
299*4882a593Smuzhiyun 		kfree(pq->req_in_use);
300*4882a593Smuzhiyun 		kmem_cache_destroy(pq->txreq_cache);
301*4882a593Smuzhiyun 		flush_pq_iowait(pq);
302*4882a593Smuzhiyun 		kfree(pq);
303*4882a593Smuzhiyun 	} else {
304*4882a593Smuzhiyun 		spin_unlock(&fd->pq_rcu_lock);
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	if (fd->cq) {
307*4882a593Smuzhiyun 		vfree(fd->cq->comps);
308*4882a593Smuzhiyun 		kfree(fd->cq);
309*4882a593Smuzhiyun 		fd->cq = NULL;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 	return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
dlid_to_selector(u16 dlid)314*4882a593Smuzhiyun static u8 dlid_to_selector(u16 dlid)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	static u8 mapping[256];
317*4882a593Smuzhiyun 	static int initialized;
318*4882a593Smuzhiyun 	static u8 next;
319*4882a593Smuzhiyun 	int hash;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (!initialized) {
322*4882a593Smuzhiyun 		memset(mapping, 0xFF, 256);
323*4882a593Smuzhiyun 		initialized = 1;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	hash = ((dlid >> 8) ^ dlid) & 0xFF;
327*4882a593Smuzhiyun 	if (mapping[hash] == 0xFF) {
328*4882a593Smuzhiyun 		mapping[hash] = next;
329*4882a593Smuzhiyun 		next = (next + 1) & 0x7F;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	return mapping[hash];
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun  * hfi1_user_sdma_process_request() - Process and start a user sdma request
337*4882a593Smuzhiyun  * @fd: valid file descriptor
338*4882a593Smuzhiyun  * @iovec: array of io vectors to process
339*4882a593Smuzhiyun  * @dim: overall iovec array size
340*4882a593Smuzhiyun  * @count: number of io vector array entries processed
341*4882a593Smuzhiyun  */
hfi1_user_sdma_process_request(struct hfi1_filedata * fd,struct iovec * iovec,unsigned long dim,unsigned long * count)342*4882a593Smuzhiyun int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
343*4882a593Smuzhiyun 				   struct iovec *iovec, unsigned long dim,
344*4882a593Smuzhiyun 				   unsigned long *count)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	int ret = 0, i;
347*4882a593Smuzhiyun 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
348*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq =
349*4882a593Smuzhiyun 		srcu_dereference(fd->pq, &fd->pq_srcu);
350*4882a593Smuzhiyun 	struct hfi1_user_sdma_comp_q *cq = fd->cq;
351*4882a593Smuzhiyun 	struct hfi1_devdata *dd = pq->dd;
352*4882a593Smuzhiyun 	unsigned long idx = 0;
353*4882a593Smuzhiyun 	u8 pcount = initial_pkt_count;
354*4882a593Smuzhiyun 	struct sdma_req_info info;
355*4882a593Smuzhiyun 	struct user_sdma_request *req;
356*4882a593Smuzhiyun 	u8 opcode, sc, vl;
357*4882a593Smuzhiyun 	u16 pkey;
358*4882a593Smuzhiyun 	u32 slid;
359*4882a593Smuzhiyun 	u16 dlid;
360*4882a593Smuzhiyun 	u32 selector;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
363*4882a593Smuzhiyun 		hfi1_cdbg(
364*4882a593Smuzhiyun 		   SDMA,
365*4882a593Smuzhiyun 		   "[%u:%u:%u] First vector not big enough for header %lu/%lu",
366*4882a593Smuzhiyun 		   dd->unit, uctxt->ctxt, fd->subctxt,
367*4882a593Smuzhiyun 		   iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
368*4882a593Smuzhiyun 		return -EINVAL;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 	ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
371*4882a593Smuzhiyun 	if (ret) {
372*4882a593Smuzhiyun 		hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
373*4882a593Smuzhiyun 			  dd->unit, uctxt->ctxt, fd->subctxt, ret);
374*4882a593Smuzhiyun 		return -EFAULT;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
378*4882a593Smuzhiyun 				     (u16 *)&info);
379*4882a593Smuzhiyun 	if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
380*4882a593Smuzhiyun 		hfi1_cdbg(SDMA,
381*4882a593Smuzhiyun 			  "[%u:%u:%u:%u] Invalid comp index",
382*4882a593Smuzhiyun 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
383*4882a593Smuzhiyun 		return -EINVAL;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/*
387*4882a593Smuzhiyun 	 * Sanity check the header io vector count.  Need at least 1 vector
388*4882a593Smuzhiyun 	 * (header) and cannot be larger than the actual io vector count.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
391*4882a593Smuzhiyun 		hfi1_cdbg(SDMA,
392*4882a593Smuzhiyun 			  "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
393*4882a593Smuzhiyun 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
394*4882a593Smuzhiyun 			  req_iovcnt(info.ctrl), dim);
395*4882a593Smuzhiyun 		return -EINVAL;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (!info.fragsize) {
399*4882a593Smuzhiyun 		hfi1_cdbg(SDMA,
400*4882a593Smuzhiyun 			  "[%u:%u:%u:%u] Request does not specify fragsize",
401*4882a593Smuzhiyun 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
402*4882a593Smuzhiyun 		return -EINVAL;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* Try to claim the request. */
406*4882a593Smuzhiyun 	if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
407*4882a593Smuzhiyun 		hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
408*4882a593Smuzhiyun 			  dd->unit, uctxt->ctxt, fd->subctxt,
409*4882a593Smuzhiyun 			  info.comp_idx);
410*4882a593Smuzhiyun 		return -EBADSLT;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	/*
413*4882a593Smuzhiyun 	 * All safety checks have been done and this request has been claimed.
414*4882a593Smuzhiyun 	 */
415*4882a593Smuzhiyun 	trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
416*4882a593Smuzhiyun 					     info.comp_idx);
417*4882a593Smuzhiyun 	req = pq->reqs + info.comp_idx;
418*4882a593Smuzhiyun 	req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
419*4882a593Smuzhiyun 	req->data_len  = 0;
420*4882a593Smuzhiyun 	req->pq = pq;
421*4882a593Smuzhiyun 	req->cq = cq;
422*4882a593Smuzhiyun 	req->ahg_idx = -1;
423*4882a593Smuzhiyun 	req->iov_idx = 0;
424*4882a593Smuzhiyun 	req->sent = 0;
425*4882a593Smuzhiyun 	req->seqnum = 0;
426*4882a593Smuzhiyun 	req->seqcomp = 0;
427*4882a593Smuzhiyun 	req->seqsubmitted = 0;
428*4882a593Smuzhiyun 	req->tids = NULL;
429*4882a593Smuzhiyun 	req->has_error = 0;
430*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->txps);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	memcpy(&req->info, &info, sizeof(info));
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* The request is initialized, count it */
435*4882a593Smuzhiyun 	atomic_inc(&pq->n_reqs);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (req_opcode(info.ctrl) == EXPECTED) {
438*4882a593Smuzhiyun 		/* expected must have a TID info and at least one data vector */
439*4882a593Smuzhiyun 		if (req->data_iovs < 2) {
440*4882a593Smuzhiyun 			SDMA_DBG(req,
441*4882a593Smuzhiyun 				 "Not enough vectors for expected request");
442*4882a593Smuzhiyun 			ret = -EINVAL;
443*4882a593Smuzhiyun 			goto free_req;
444*4882a593Smuzhiyun 		}
445*4882a593Smuzhiyun 		req->data_iovs--;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
449*4882a593Smuzhiyun 		SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
450*4882a593Smuzhiyun 			 MAX_VECTORS_PER_REQ);
451*4882a593Smuzhiyun 		ret = -EINVAL;
452*4882a593Smuzhiyun 		goto free_req;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 	/* Copy the header from the user buffer */
455*4882a593Smuzhiyun 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
456*4882a593Smuzhiyun 			     sizeof(req->hdr));
457*4882a593Smuzhiyun 	if (ret) {
458*4882a593Smuzhiyun 		SDMA_DBG(req, "Failed to copy header template (%d)", ret);
459*4882a593Smuzhiyun 		ret = -EFAULT;
460*4882a593Smuzhiyun 		goto free_req;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* If Static rate control is not enabled, sanitize the header. */
464*4882a593Smuzhiyun 	if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
465*4882a593Smuzhiyun 		req->hdr.pbc[2] = 0;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* Validate the opcode. Do not trust packets from user space blindly. */
468*4882a593Smuzhiyun 	opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
469*4882a593Smuzhiyun 	if ((opcode & USER_OPCODE_CHECK_MASK) !=
470*4882a593Smuzhiyun 	     USER_OPCODE_CHECK_VAL) {
471*4882a593Smuzhiyun 		SDMA_DBG(req, "Invalid opcode (%d)", opcode);
472*4882a593Smuzhiyun 		ret = -EINVAL;
473*4882a593Smuzhiyun 		goto free_req;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 	/*
476*4882a593Smuzhiyun 	 * Validate the vl. Do not trust packets from user space blindly.
477*4882a593Smuzhiyun 	 * VL comes from PBC, SC comes from LRH, and the VL needs to
478*4882a593Smuzhiyun 	 * match the SC look up.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
481*4882a593Smuzhiyun 	sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
482*4882a593Smuzhiyun 	      (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
483*4882a593Smuzhiyun 	if (vl >= dd->pport->vls_operational ||
484*4882a593Smuzhiyun 	    vl != sc_to_vlt(dd, sc)) {
485*4882a593Smuzhiyun 		SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
486*4882a593Smuzhiyun 		ret = -EINVAL;
487*4882a593Smuzhiyun 		goto free_req;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	/* Checking P_KEY for requests from user-space */
491*4882a593Smuzhiyun 	pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
492*4882a593Smuzhiyun 	slid = be16_to_cpu(req->hdr.lrh[3]);
493*4882a593Smuzhiyun 	if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
494*4882a593Smuzhiyun 		ret = -EINVAL;
495*4882a593Smuzhiyun 		goto free_req;
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/*
499*4882a593Smuzhiyun 	 * Also should check the BTH.lnh. If it says the next header is GRH then
500*4882a593Smuzhiyun 	 * the RXE parsing will be off and will land in the middle of the KDETH
501*4882a593Smuzhiyun 	 * or miss it entirely.
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
504*4882a593Smuzhiyun 		SDMA_DBG(req, "User tried to pass in a GRH");
505*4882a593Smuzhiyun 		ret = -EINVAL;
506*4882a593Smuzhiyun 		goto free_req;
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
510*4882a593Smuzhiyun 	/*
511*4882a593Smuzhiyun 	 * Calculate the initial TID offset based on the values of
512*4882a593Smuzhiyun 	 * KDETH.OFFSET and KDETH.OM that are passed in.
513*4882a593Smuzhiyun 	 */
514*4882a593Smuzhiyun 	req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
515*4882a593Smuzhiyun 		(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
516*4882a593Smuzhiyun 		 KDETH_OM_LARGE : KDETH_OM_SMALL);
517*4882a593Smuzhiyun 	trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
518*4882a593Smuzhiyun 					       info.comp_idx, req->tidoffset);
519*4882a593Smuzhiyun 	idx++;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Save all the IO vector structures */
522*4882a593Smuzhiyun 	for (i = 0; i < req->data_iovs; i++) {
523*4882a593Smuzhiyun 		req->iovs[i].offset = 0;
524*4882a593Smuzhiyun 		INIT_LIST_HEAD(&req->iovs[i].list);
525*4882a593Smuzhiyun 		memcpy(&req->iovs[i].iov,
526*4882a593Smuzhiyun 		       iovec + idx++,
527*4882a593Smuzhiyun 		       sizeof(req->iovs[i].iov));
528*4882a593Smuzhiyun 		ret = pin_vector_pages(req, &req->iovs[i]);
529*4882a593Smuzhiyun 		if (ret) {
530*4882a593Smuzhiyun 			req->data_iovs = i;
531*4882a593Smuzhiyun 			goto free_req;
532*4882a593Smuzhiyun 		}
533*4882a593Smuzhiyun 		req->data_len += req->iovs[i].iov.iov_len;
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 	trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
536*4882a593Smuzhiyun 					 info.comp_idx, req->data_len);
537*4882a593Smuzhiyun 	if (pcount > req->info.npkts)
538*4882a593Smuzhiyun 		pcount = req->info.npkts;
539*4882a593Smuzhiyun 	/*
540*4882a593Smuzhiyun 	 * Copy any TID info
541*4882a593Smuzhiyun 	 * User space will provide the TID info only when the
542*4882a593Smuzhiyun 	 * request type is EXPECTED. This is true even if there is
543*4882a593Smuzhiyun 	 * only one packet in the request and the header is already
544*4882a593Smuzhiyun 	 * setup. The reason for the singular TID case is that the
545*4882a593Smuzhiyun 	 * driver needs to perform safety checks.
546*4882a593Smuzhiyun 	 */
547*4882a593Smuzhiyun 	if (req_opcode(req->info.ctrl) == EXPECTED) {
548*4882a593Smuzhiyun 		u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
549*4882a593Smuzhiyun 		u32 *tmp;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
552*4882a593Smuzhiyun 			ret = -EINVAL;
553*4882a593Smuzhiyun 			goto free_req;
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 		/*
557*4882a593Smuzhiyun 		 * We have to copy all of the tids because they may vary
558*4882a593Smuzhiyun 		 * in size and, therefore, the TID count might not be
559*4882a593Smuzhiyun 		 * equal to the pkt count. However, there is no way to
560*4882a593Smuzhiyun 		 * tell at this point.
561*4882a593Smuzhiyun 		 */
562*4882a593Smuzhiyun 		tmp = memdup_user(iovec[idx].iov_base,
563*4882a593Smuzhiyun 				  ntids * sizeof(*req->tids));
564*4882a593Smuzhiyun 		if (IS_ERR(tmp)) {
565*4882a593Smuzhiyun 			ret = PTR_ERR(tmp);
566*4882a593Smuzhiyun 			SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
567*4882a593Smuzhiyun 				 ntids, ret);
568*4882a593Smuzhiyun 			goto free_req;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 		req->tids = tmp;
571*4882a593Smuzhiyun 		req->n_tids = ntids;
572*4882a593Smuzhiyun 		req->tididx = 0;
573*4882a593Smuzhiyun 		idx++;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	dlid = be16_to_cpu(req->hdr.lrh[1]);
577*4882a593Smuzhiyun 	selector = dlid_to_selector(dlid);
578*4882a593Smuzhiyun 	selector += uctxt->ctxt + fd->subctxt;
579*4882a593Smuzhiyun 	req->sde = sdma_select_user_engine(dd, selector, vl);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (!req->sde || !sdma_running(req->sde)) {
582*4882a593Smuzhiyun 		ret = -ECOMM;
583*4882a593Smuzhiyun 		goto free_req;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	/* We don't need an AHG entry if the request contains only one packet */
587*4882a593Smuzhiyun 	if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
588*4882a593Smuzhiyun 		req->ahg_idx = sdma_ahg_alloc(req->sde);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
591*4882a593Smuzhiyun 	pq->state = SDMA_PKT_Q_ACTIVE;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/*
594*4882a593Smuzhiyun 	 * This is a somewhat blocking send implementation.
595*4882a593Smuzhiyun 	 * The driver will block the caller until all packets of the
596*4882a593Smuzhiyun 	 * request have been submitted to the SDMA engine. However, it
597*4882a593Smuzhiyun 	 * will not wait for send completions.
598*4882a593Smuzhiyun 	 */
599*4882a593Smuzhiyun 	while (req->seqsubmitted != req->info.npkts) {
600*4882a593Smuzhiyun 		ret = user_sdma_send_pkts(req, pcount);
601*4882a593Smuzhiyun 		if (ret < 0) {
602*4882a593Smuzhiyun 			if (ret != -EBUSY)
603*4882a593Smuzhiyun 				goto free_req;
604*4882a593Smuzhiyun 			if (wait_event_interruptible_timeout(
605*4882a593Smuzhiyun 				pq->busy.wait_dma,
606*4882a593Smuzhiyun 				pq->state == SDMA_PKT_Q_ACTIVE,
607*4882a593Smuzhiyun 				msecs_to_jiffies(
608*4882a593Smuzhiyun 					SDMA_IOWAIT_TIMEOUT)) <= 0)
609*4882a593Smuzhiyun 				flush_pq_iowait(pq);
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 	*count += idx;
613*4882a593Smuzhiyun 	return 0;
614*4882a593Smuzhiyun free_req:
615*4882a593Smuzhiyun 	/*
616*4882a593Smuzhiyun 	 * If the submitted seqsubmitted == npkts, the completion routine
617*4882a593Smuzhiyun 	 * controls the final state.  If sequbmitted < npkts, wait for any
618*4882a593Smuzhiyun 	 * outstanding packets to finish before cleaning up.
619*4882a593Smuzhiyun 	 */
620*4882a593Smuzhiyun 	if (req->seqsubmitted < req->info.npkts) {
621*4882a593Smuzhiyun 		if (req->seqsubmitted)
622*4882a593Smuzhiyun 			wait_event(pq->busy.wait_dma,
623*4882a593Smuzhiyun 				   (req->seqcomp == req->seqsubmitted - 1));
624*4882a593Smuzhiyun 		user_sdma_free_request(req, true);
625*4882a593Smuzhiyun 		pq_update(pq);
626*4882a593Smuzhiyun 		set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 	return ret;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
compute_data_length(struct user_sdma_request * req,struct user_sdma_txreq * tx)631*4882a593Smuzhiyun static inline u32 compute_data_length(struct user_sdma_request *req,
632*4882a593Smuzhiyun 				      struct user_sdma_txreq *tx)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	/*
635*4882a593Smuzhiyun 	 * Determine the proper size of the packet data.
636*4882a593Smuzhiyun 	 * The size of the data of the first packet is in the header
637*4882a593Smuzhiyun 	 * template. However, it includes the header and ICRC, which need
638*4882a593Smuzhiyun 	 * to be subtracted.
639*4882a593Smuzhiyun 	 * The minimum representable packet data length in a header is 4 bytes,
640*4882a593Smuzhiyun 	 * therefore, when the data length request is less than 4 bytes, there's
641*4882a593Smuzhiyun 	 * only one packet, and the packet data length is equal to that of the
642*4882a593Smuzhiyun 	 * request data length.
643*4882a593Smuzhiyun 	 * The size of the remaining packets is the minimum of the frag
644*4882a593Smuzhiyun 	 * size (MTU) or remaining data in the request.
645*4882a593Smuzhiyun 	 */
646*4882a593Smuzhiyun 	u32 len;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	if (!req->seqnum) {
649*4882a593Smuzhiyun 		if (req->data_len < sizeof(u32))
650*4882a593Smuzhiyun 			len = req->data_len;
651*4882a593Smuzhiyun 		else
652*4882a593Smuzhiyun 			len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
653*4882a593Smuzhiyun 			       (sizeof(tx->hdr) - 4));
654*4882a593Smuzhiyun 	} else if (req_opcode(req->info.ctrl) == EXPECTED) {
655*4882a593Smuzhiyun 		u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
656*4882a593Smuzhiyun 			PAGE_SIZE;
657*4882a593Smuzhiyun 		/*
658*4882a593Smuzhiyun 		 * Get the data length based on the remaining space in the
659*4882a593Smuzhiyun 		 * TID pair.
660*4882a593Smuzhiyun 		 */
661*4882a593Smuzhiyun 		len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
662*4882a593Smuzhiyun 		/* If we've filled up the TID pair, move to the next one. */
663*4882a593Smuzhiyun 		if (unlikely(!len) && ++req->tididx < req->n_tids &&
664*4882a593Smuzhiyun 		    req->tids[req->tididx]) {
665*4882a593Smuzhiyun 			tidlen = EXP_TID_GET(req->tids[req->tididx],
666*4882a593Smuzhiyun 					     LEN) * PAGE_SIZE;
667*4882a593Smuzhiyun 			req->tidoffset = 0;
668*4882a593Smuzhiyun 			len = min_t(u32, tidlen, req->info.fragsize);
669*4882a593Smuzhiyun 		}
670*4882a593Smuzhiyun 		/*
671*4882a593Smuzhiyun 		 * Since the TID pairs map entire pages, make sure that we
672*4882a593Smuzhiyun 		 * are not going to try to send more data that we have
673*4882a593Smuzhiyun 		 * remaining.
674*4882a593Smuzhiyun 		 */
675*4882a593Smuzhiyun 		len = min(len, req->data_len - req->sent);
676*4882a593Smuzhiyun 	} else {
677*4882a593Smuzhiyun 		len = min(req->data_len - req->sent, (u32)req->info.fragsize);
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 	trace_hfi1_sdma_user_compute_length(req->pq->dd,
680*4882a593Smuzhiyun 					    req->pq->ctxt,
681*4882a593Smuzhiyun 					    req->pq->subctxt,
682*4882a593Smuzhiyun 					    req->info.comp_idx,
683*4882a593Smuzhiyun 					    len);
684*4882a593Smuzhiyun 	return len;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
pad_len(u32 len)687*4882a593Smuzhiyun static inline u32 pad_len(u32 len)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	if (len & (sizeof(u32) - 1))
690*4882a593Smuzhiyun 		len += sizeof(u32) - (len & (sizeof(u32) - 1));
691*4882a593Smuzhiyun 	return len;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
get_lrh_len(struct hfi1_pkt_header hdr,u32 len)694*4882a593Smuzhiyun static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	/* (Size of complete header - size of PBC) + 4B ICRC + data length */
697*4882a593Smuzhiyun 	return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
user_sdma_txadd_ahg(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)700*4882a593Smuzhiyun static int user_sdma_txadd_ahg(struct user_sdma_request *req,
701*4882a593Smuzhiyun 			       struct user_sdma_txreq *tx,
702*4882a593Smuzhiyun 			       u32 datalen)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	int ret;
705*4882a593Smuzhiyun 	u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
706*4882a593Smuzhiyun 	u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
707*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/*
710*4882a593Smuzhiyun 	 * Copy the request header into the tx header
711*4882a593Smuzhiyun 	 * because the HW needs a cacheline-aligned
712*4882a593Smuzhiyun 	 * address.
713*4882a593Smuzhiyun 	 * This copy can be optimized out if the hdr
714*4882a593Smuzhiyun 	 * member of user_sdma_request were also
715*4882a593Smuzhiyun 	 * cacheline aligned.
716*4882a593Smuzhiyun 	 */
717*4882a593Smuzhiyun 	memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
718*4882a593Smuzhiyun 	if (PBC2LRH(pbclen) != lrhlen) {
719*4882a593Smuzhiyun 		pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
720*4882a593Smuzhiyun 		tx->hdr.pbc[0] = cpu_to_le16(pbclen);
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 	ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
723*4882a593Smuzhiyun 	if (ret)
724*4882a593Smuzhiyun 		return ret;
725*4882a593Smuzhiyun 	ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
726*4882a593Smuzhiyun 			      sizeof(tx->hdr) + datalen, req->ahg_idx,
727*4882a593Smuzhiyun 			      0, NULL, 0, user_sdma_txreq_cb);
728*4882a593Smuzhiyun 	if (ret)
729*4882a593Smuzhiyun 		return ret;
730*4882a593Smuzhiyun 	ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
731*4882a593Smuzhiyun 	if (ret)
732*4882a593Smuzhiyun 		sdma_txclean(pq->dd, &tx->txreq);
733*4882a593Smuzhiyun 	return ret;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
user_sdma_txadd(struct user_sdma_request * req,struct user_sdma_txreq * tx,struct user_sdma_iovec * iovec,u32 datalen,u32 * queued_ptr,u32 * data_sent_ptr,u64 * iov_offset_ptr)736*4882a593Smuzhiyun static int user_sdma_txadd(struct user_sdma_request *req,
737*4882a593Smuzhiyun 			   struct user_sdma_txreq *tx,
738*4882a593Smuzhiyun 			   struct user_sdma_iovec *iovec, u32 datalen,
739*4882a593Smuzhiyun 			   u32 *queued_ptr, u32 *data_sent_ptr,
740*4882a593Smuzhiyun 			   u64 *iov_offset_ptr)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	int ret;
743*4882a593Smuzhiyun 	unsigned int pageidx, len;
744*4882a593Smuzhiyun 	unsigned long base, offset;
745*4882a593Smuzhiyun 	u64 iov_offset = *iov_offset_ptr;
746*4882a593Smuzhiyun 	u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
747*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	base = (unsigned long)iovec->iov.iov_base;
750*4882a593Smuzhiyun 	offset = offset_in_page(base + iovec->offset + iov_offset);
751*4882a593Smuzhiyun 	pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
752*4882a593Smuzhiyun 		   PAGE_SHIFT);
753*4882a593Smuzhiyun 	len = offset + req->info.fragsize > PAGE_SIZE ?
754*4882a593Smuzhiyun 		PAGE_SIZE - offset : req->info.fragsize;
755*4882a593Smuzhiyun 	len = min((datalen - queued), len);
756*4882a593Smuzhiyun 	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
757*4882a593Smuzhiyun 			      offset, len);
758*4882a593Smuzhiyun 	if (ret) {
759*4882a593Smuzhiyun 		SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
760*4882a593Smuzhiyun 		return ret;
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun 	iov_offset += len;
763*4882a593Smuzhiyun 	queued += len;
764*4882a593Smuzhiyun 	data_sent += len;
765*4882a593Smuzhiyun 	if (unlikely(queued < datalen && pageidx == iovec->npages &&
766*4882a593Smuzhiyun 		     req->iov_idx < req->data_iovs - 1)) {
767*4882a593Smuzhiyun 		iovec->offset += iov_offset;
768*4882a593Smuzhiyun 		iovec = &req->iovs[++req->iov_idx];
769*4882a593Smuzhiyun 		iov_offset = 0;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	*queued_ptr = queued;
773*4882a593Smuzhiyun 	*data_sent_ptr = data_sent;
774*4882a593Smuzhiyun 	*iov_offset_ptr = iov_offset;
775*4882a593Smuzhiyun 	return ret;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
user_sdma_send_pkts(struct user_sdma_request * req,u16 maxpkts)778*4882a593Smuzhiyun static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	int ret = 0;
781*4882a593Smuzhiyun 	u16 count;
782*4882a593Smuzhiyun 	unsigned npkts = 0;
783*4882a593Smuzhiyun 	struct user_sdma_txreq *tx = NULL;
784*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = NULL;
785*4882a593Smuzhiyun 	struct user_sdma_iovec *iovec = NULL;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	if (!req->pq)
788*4882a593Smuzhiyun 		return -EINVAL;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	pq = req->pq;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	/* If tx completion has reported an error, we are done. */
793*4882a593Smuzhiyun 	if (READ_ONCE(req->has_error))
794*4882a593Smuzhiyun 		return -EFAULT;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	/*
797*4882a593Smuzhiyun 	 * Check if we might have sent the entire request already
798*4882a593Smuzhiyun 	 */
799*4882a593Smuzhiyun 	if (unlikely(req->seqnum == req->info.npkts)) {
800*4882a593Smuzhiyun 		if (!list_empty(&req->txps))
801*4882a593Smuzhiyun 			goto dosend;
802*4882a593Smuzhiyun 		return ret;
803*4882a593Smuzhiyun 	}
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
806*4882a593Smuzhiyun 		maxpkts = req->info.npkts - req->seqnum;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	while (npkts < maxpkts) {
809*4882a593Smuzhiyun 		u32 datalen = 0, queued = 0, data_sent = 0;
810*4882a593Smuzhiyun 		u64 iov_offset = 0;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		/*
813*4882a593Smuzhiyun 		 * Check whether any of the completions have come back
814*4882a593Smuzhiyun 		 * with errors. If so, we are not going to process any
815*4882a593Smuzhiyun 		 * more packets from this request.
816*4882a593Smuzhiyun 		 */
817*4882a593Smuzhiyun 		if (READ_ONCE(req->has_error))
818*4882a593Smuzhiyun 			return -EFAULT;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 		tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
821*4882a593Smuzhiyun 		if (!tx)
822*4882a593Smuzhiyun 			return -ENOMEM;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 		tx->flags = 0;
825*4882a593Smuzhiyun 		tx->req = req;
826*4882a593Smuzhiyun 		INIT_LIST_HEAD(&tx->list);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 		/*
829*4882a593Smuzhiyun 		 * For the last packet set the ACK request
830*4882a593Smuzhiyun 		 * and disable header suppression.
831*4882a593Smuzhiyun 		 */
832*4882a593Smuzhiyun 		if (req->seqnum == req->info.npkts - 1)
833*4882a593Smuzhiyun 			tx->flags |= (TXREQ_FLAGS_REQ_ACK |
834*4882a593Smuzhiyun 				      TXREQ_FLAGS_REQ_DISABLE_SH);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		/*
837*4882a593Smuzhiyun 		 * Calculate the payload size - this is min of the fragment
838*4882a593Smuzhiyun 		 * (MTU) size or the remaining bytes in the request but only
839*4882a593Smuzhiyun 		 * if we have payload data.
840*4882a593Smuzhiyun 		 */
841*4882a593Smuzhiyun 		if (req->data_len) {
842*4882a593Smuzhiyun 			iovec = &req->iovs[req->iov_idx];
843*4882a593Smuzhiyun 			if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
844*4882a593Smuzhiyun 				if (++req->iov_idx == req->data_iovs) {
845*4882a593Smuzhiyun 					ret = -EFAULT;
846*4882a593Smuzhiyun 					goto free_tx;
847*4882a593Smuzhiyun 				}
848*4882a593Smuzhiyun 				iovec = &req->iovs[req->iov_idx];
849*4882a593Smuzhiyun 				WARN_ON(iovec->offset);
850*4882a593Smuzhiyun 			}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 			datalen = compute_data_length(req, tx);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 			/*
855*4882a593Smuzhiyun 			 * Disable header suppression for the payload <= 8DWS.
856*4882a593Smuzhiyun 			 * If there is an uncorrectable error in the receive
857*4882a593Smuzhiyun 			 * data FIFO when the received payload size is less than
858*4882a593Smuzhiyun 			 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
859*4882a593Smuzhiyun 			 * not reported.There is set RHF.EccErr if the header
860*4882a593Smuzhiyun 			 * is not suppressed.
861*4882a593Smuzhiyun 			 */
862*4882a593Smuzhiyun 			if (!datalen) {
863*4882a593Smuzhiyun 				SDMA_DBG(req,
864*4882a593Smuzhiyun 					 "Request has data but pkt len is 0");
865*4882a593Smuzhiyun 				ret = -EFAULT;
866*4882a593Smuzhiyun 				goto free_tx;
867*4882a593Smuzhiyun 			} else if (datalen <= 32) {
868*4882a593Smuzhiyun 				tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
869*4882a593Smuzhiyun 			}
870*4882a593Smuzhiyun 		}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		if (req->ahg_idx >= 0) {
873*4882a593Smuzhiyun 			if (!req->seqnum) {
874*4882a593Smuzhiyun 				ret = user_sdma_txadd_ahg(req, tx, datalen);
875*4882a593Smuzhiyun 				if (ret)
876*4882a593Smuzhiyun 					goto free_tx;
877*4882a593Smuzhiyun 			} else {
878*4882a593Smuzhiyun 				int changes;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 				changes = set_txreq_header_ahg(req, tx,
881*4882a593Smuzhiyun 							       datalen);
882*4882a593Smuzhiyun 				if (changes < 0) {
883*4882a593Smuzhiyun 					ret = changes;
884*4882a593Smuzhiyun 					goto free_tx;
885*4882a593Smuzhiyun 				}
886*4882a593Smuzhiyun 			}
887*4882a593Smuzhiyun 		} else {
888*4882a593Smuzhiyun 			ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
889*4882a593Smuzhiyun 					  datalen, user_sdma_txreq_cb);
890*4882a593Smuzhiyun 			if (ret)
891*4882a593Smuzhiyun 				goto free_tx;
892*4882a593Smuzhiyun 			/*
893*4882a593Smuzhiyun 			 * Modify the header for this packet. This only needs
894*4882a593Smuzhiyun 			 * to be done if we are not going to use AHG. Otherwise,
895*4882a593Smuzhiyun 			 * the HW will do it based on the changes we gave it
896*4882a593Smuzhiyun 			 * during sdma_txinit_ahg().
897*4882a593Smuzhiyun 			 */
898*4882a593Smuzhiyun 			ret = set_txreq_header(req, tx, datalen);
899*4882a593Smuzhiyun 			if (ret)
900*4882a593Smuzhiyun 				goto free_txreq;
901*4882a593Smuzhiyun 		}
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 		/*
904*4882a593Smuzhiyun 		 * If the request contains any data vectors, add up to
905*4882a593Smuzhiyun 		 * fragsize bytes to the descriptor.
906*4882a593Smuzhiyun 		 */
907*4882a593Smuzhiyun 		while (queued < datalen &&
908*4882a593Smuzhiyun 		       (req->sent + data_sent) < req->data_len) {
909*4882a593Smuzhiyun 			ret = user_sdma_txadd(req, tx, iovec, datalen,
910*4882a593Smuzhiyun 					      &queued, &data_sent, &iov_offset);
911*4882a593Smuzhiyun 			if (ret)
912*4882a593Smuzhiyun 				goto free_txreq;
913*4882a593Smuzhiyun 		}
914*4882a593Smuzhiyun 		/*
915*4882a593Smuzhiyun 		 * The txreq was submitted successfully so we can update
916*4882a593Smuzhiyun 		 * the counters.
917*4882a593Smuzhiyun 		 */
918*4882a593Smuzhiyun 		req->koffset += datalen;
919*4882a593Smuzhiyun 		if (req_opcode(req->info.ctrl) == EXPECTED)
920*4882a593Smuzhiyun 			req->tidoffset += datalen;
921*4882a593Smuzhiyun 		req->sent += data_sent;
922*4882a593Smuzhiyun 		if (req->data_len)
923*4882a593Smuzhiyun 			iovec->offset += iov_offset;
924*4882a593Smuzhiyun 		list_add_tail(&tx->txreq.list, &req->txps);
925*4882a593Smuzhiyun 		/*
926*4882a593Smuzhiyun 		 * It is important to increment this here as it is used to
927*4882a593Smuzhiyun 		 * generate the BTH.PSN and, therefore, can't be bulk-updated
928*4882a593Smuzhiyun 		 * outside of the loop.
929*4882a593Smuzhiyun 		 */
930*4882a593Smuzhiyun 		tx->seqnum = req->seqnum++;
931*4882a593Smuzhiyun 		npkts++;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun dosend:
934*4882a593Smuzhiyun 	ret = sdma_send_txlist(req->sde,
935*4882a593Smuzhiyun 			       iowait_get_ib_work(&pq->busy),
936*4882a593Smuzhiyun 			       &req->txps, &count);
937*4882a593Smuzhiyun 	req->seqsubmitted += count;
938*4882a593Smuzhiyun 	if (req->seqsubmitted == req->info.npkts) {
939*4882a593Smuzhiyun 		/*
940*4882a593Smuzhiyun 		 * The txreq has already been submitted to the HW queue
941*4882a593Smuzhiyun 		 * so we can free the AHG entry now. Corruption will not
942*4882a593Smuzhiyun 		 * happen due to the sequential manner in which
943*4882a593Smuzhiyun 		 * descriptors are processed.
944*4882a593Smuzhiyun 		 */
945*4882a593Smuzhiyun 		if (req->ahg_idx >= 0)
946*4882a593Smuzhiyun 			sdma_ahg_free(req->sde, req->ahg_idx);
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 	return ret;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun free_txreq:
951*4882a593Smuzhiyun 	sdma_txclean(pq->dd, &tx->txreq);
952*4882a593Smuzhiyun free_tx:
953*4882a593Smuzhiyun 	kmem_cache_free(pq->txreq_cache, tx);
954*4882a593Smuzhiyun 	return ret;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
sdma_cache_evict(struct hfi1_user_sdma_pkt_q * pq,u32 npages)957*4882a593Smuzhiyun static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct evict_data evict_data;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	evict_data.cleared = 0;
962*4882a593Smuzhiyun 	evict_data.target = npages;
963*4882a593Smuzhiyun 	hfi1_mmu_rb_evict(pq->handler, &evict_data);
964*4882a593Smuzhiyun 	return evict_data.cleared;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
pin_sdma_pages(struct user_sdma_request * req,struct user_sdma_iovec * iovec,struct sdma_mmu_node * node,int npages)967*4882a593Smuzhiyun static int pin_sdma_pages(struct user_sdma_request *req,
968*4882a593Smuzhiyun 			  struct user_sdma_iovec *iovec,
969*4882a593Smuzhiyun 			  struct sdma_mmu_node *node,
970*4882a593Smuzhiyun 			  int npages)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	int pinned, cleared;
973*4882a593Smuzhiyun 	struct page **pages;
974*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
977*4882a593Smuzhiyun 	if (!pages)
978*4882a593Smuzhiyun 		return -ENOMEM;
979*4882a593Smuzhiyun 	memcpy(pages, node->pages, node->npages * sizeof(*pages));
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	npages -= node->npages;
982*4882a593Smuzhiyun retry:
983*4882a593Smuzhiyun 	if (!hfi1_can_pin_pages(pq->dd, current->mm,
984*4882a593Smuzhiyun 				atomic_read(&pq->n_locked), npages)) {
985*4882a593Smuzhiyun 		cleared = sdma_cache_evict(pq, npages);
986*4882a593Smuzhiyun 		if (cleared >= npages)
987*4882a593Smuzhiyun 			goto retry;
988*4882a593Smuzhiyun 	}
989*4882a593Smuzhiyun 	pinned = hfi1_acquire_user_pages(current->mm,
990*4882a593Smuzhiyun 					 ((unsigned long)iovec->iov.iov_base +
991*4882a593Smuzhiyun 					 (node->npages * PAGE_SIZE)), npages, 0,
992*4882a593Smuzhiyun 					 pages + node->npages);
993*4882a593Smuzhiyun 	if (pinned < 0) {
994*4882a593Smuzhiyun 		kfree(pages);
995*4882a593Smuzhiyun 		return pinned;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 	if (pinned != npages) {
998*4882a593Smuzhiyun 		unpin_vector_pages(current->mm, pages, node->npages, pinned);
999*4882a593Smuzhiyun 		return -EFAULT;
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 	kfree(node->pages);
1002*4882a593Smuzhiyun 	node->rb.len = iovec->iov.iov_len;
1003*4882a593Smuzhiyun 	node->pages = pages;
1004*4882a593Smuzhiyun 	atomic_add(pinned, &pq->n_locked);
1005*4882a593Smuzhiyun 	return pinned;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
unpin_sdma_pages(struct sdma_mmu_node * node)1008*4882a593Smuzhiyun static void unpin_sdma_pages(struct sdma_mmu_node *node)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	if (node->npages) {
1011*4882a593Smuzhiyun 		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
1012*4882a593Smuzhiyun 				   node->npages);
1013*4882a593Smuzhiyun 		atomic_sub(node->npages, &node->pq->n_locked);
1014*4882a593Smuzhiyun 	}
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
pin_vector_pages(struct user_sdma_request * req,struct user_sdma_iovec * iovec)1017*4882a593Smuzhiyun static int pin_vector_pages(struct user_sdma_request *req,
1018*4882a593Smuzhiyun 			    struct user_sdma_iovec *iovec)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	int ret = 0, pinned, npages;
1021*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1022*4882a593Smuzhiyun 	struct sdma_mmu_node *node = NULL;
1023*4882a593Smuzhiyun 	struct mmu_rb_node *rb_node;
1024*4882a593Smuzhiyun 	struct iovec *iov;
1025*4882a593Smuzhiyun 	bool extracted;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	extracted =
1028*4882a593Smuzhiyun 		hfi1_mmu_rb_remove_unless_exact(pq->handler,
1029*4882a593Smuzhiyun 						(unsigned long)
1030*4882a593Smuzhiyun 						iovec->iov.iov_base,
1031*4882a593Smuzhiyun 						iovec->iov.iov_len, &rb_node);
1032*4882a593Smuzhiyun 	if (rb_node) {
1033*4882a593Smuzhiyun 		node = container_of(rb_node, struct sdma_mmu_node, rb);
1034*4882a593Smuzhiyun 		if (!extracted) {
1035*4882a593Smuzhiyun 			atomic_inc(&node->refcount);
1036*4882a593Smuzhiyun 			iovec->pages = node->pages;
1037*4882a593Smuzhiyun 			iovec->npages = node->npages;
1038*4882a593Smuzhiyun 			iovec->node = node;
1039*4882a593Smuzhiyun 			return 0;
1040*4882a593Smuzhiyun 		}
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (!node) {
1044*4882a593Smuzhiyun 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1045*4882a593Smuzhiyun 		if (!node)
1046*4882a593Smuzhiyun 			return -ENOMEM;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 		node->rb.addr = (unsigned long)iovec->iov.iov_base;
1049*4882a593Smuzhiyun 		node->pq = pq;
1050*4882a593Smuzhiyun 		atomic_set(&node->refcount, 0);
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	iov = &iovec->iov;
1054*4882a593Smuzhiyun 	npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
1055*4882a593Smuzhiyun 	if (node->npages < npages) {
1056*4882a593Smuzhiyun 		pinned = pin_sdma_pages(req, iovec, node, npages);
1057*4882a593Smuzhiyun 		if (pinned < 0) {
1058*4882a593Smuzhiyun 			ret = pinned;
1059*4882a593Smuzhiyun 			goto bail;
1060*4882a593Smuzhiyun 		}
1061*4882a593Smuzhiyun 		node->npages += pinned;
1062*4882a593Smuzhiyun 		npages = node->npages;
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun 	iovec->pages = node->pages;
1065*4882a593Smuzhiyun 	iovec->npages = npages;
1066*4882a593Smuzhiyun 	iovec->node = node;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1069*4882a593Smuzhiyun 	if (ret) {
1070*4882a593Smuzhiyun 		iovec->node = NULL;
1071*4882a593Smuzhiyun 		goto bail;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	return 0;
1074*4882a593Smuzhiyun bail:
1075*4882a593Smuzhiyun 	unpin_sdma_pages(node);
1076*4882a593Smuzhiyun 	kfree(node);
1077*4882a593Smuzhiyun 	return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
unpin_vector_pages(struct mm_struct * mm,struct page ** pages,unsigned start,unsigned npages)1080*4882a593Smuzhiyun static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1081*4882a593Smuzhiyun 			       unsigned start, unsigned npages)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	hfi1_release_user_pages(mm, pages + start, npages, false);
1084*4882a593Smuzhiyun 	kfree(pages);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun 
check_header_template(struct user_sdma_request * req,struct hfi1_pkt_header * hdr,u32 lrhlen,u32 datalen)1087*4882a593Smuzhiyun static int check_header_template(struct user_sdma_request *req,
1088*4882a593Smuzhiyun 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
1089*4882a593Smuzhiyun 				 u32 datalen)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	/*
1092*4882a593Smuzhiyun 	 * Perform safety checks for any type of packet:
1093*4882a593Smuzhiyun 	 *    - transfer size is multiple of 64bytes
1094*4882a593Smuzhiyun 	 *    - packet length is multiple of 4 bytes
1095*4882a593Smuzhiyun 	 *    - packet length is not larger than MTU size
1096*4882a593Smuzhiyun 	 *
1097*4882a593Smuzhiyun 	 * These checks are only done for the first packet of the
1098*4882a593Smuzhiyun 	 * transfer since the header is "given" to us by user space.
1099*4882a593Smuzhiyun 	 * For the remainder of the packets we compute the values.
1100*4882a593Smuzhiyun 	 */
1101*4882a593Smuzhiyun 	if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1102*4882a593Smuzhiyun 	    lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1103*4882a593Smuzhiyun 		return -EINVAL;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1106*4882a593Smuzhiyun 		/*
1107*4882a593Smuzhiyun 		 * The header is checked only on the first packet. Furthermore,
1108*4882a593Smuzhiyun 		 * we ensure that at least one TID entry is copied when the
1109*4882a593Smuzhiyun 		 * request is submitted. Therefore, we don't have to verify that
1110*4882a593Smuzhiyun 		 * tididx points to something sane.
1111*4882a593Smuzhiyun 		 */
1112*4882a593Smuzhiyun 		u32 tidval = req->tids[req->tididx],
1113*4882a593Smuzhiyun 			tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1114*4882a593Smuzhiyun 			tididx = EXP_TID_GET(tidval, IDX),
1115*4882a593Smuzhiyun 			tidctrl = EXP_TID_GET(tidval, CTRL),
1116*4882a593Smuzhiyun 			tidoff;
1117*4882a593Smuzhiyun 		__le32 kval = hdr->kdeth.ver_tid_offset;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 		tidoff = KDETH_GET(kval, OFFSET) *
1120*4882a593Smuzhiyun 			  (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1121*4882a593Smuzhiyun 			   KDETH_OM_LARGE : KDETH_OM_SMALL);
1122*4882a593Smuzhiyun 		/*
1123*4882a593Smuzhiyun 		 * Expected receive packets have the following
1124*4882a593Smuzhiyun 		 * additional checks:
1125*4882a593Smuzhiyun 		 *     - offset is not larger than the TID size
1126*4882a593Smuzhiyun 		 *     - TIDCtrl values match between header and TID array
1127*4882a593Smuzhiyun 		 *     - TID indexes match between header and TID array
1128*4882a593Smuzhiyun 		 */
1129*4882a593Smuzhiyun 		if ((tidoff + datalen > tidlen) ||
1130*4882a593Smuzhiyun 		    KDETH_GET(kval, TIDCTRL) != tidctrl ||
1131*4882a593Smuzhiyun 		    KDETH_GET(kval, TID) != tididx)
1132*4882a593Smuzhiyun 			return -EINVAL;
1133*4882a593Smuzhiyun 	}
1134*4882a593Smuzhiyun 	return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun  * Correctly set the BTH.PSN field based on type of
1139*4882a593Smuzhiyun  * transfer - eager packets can just increment the PSN but
1140*4882a593Smuzhiyun  * expected packets encode generation and sequence in the
1141*4882a593Smuzhiyun  * BTH.PSN field so just incrementing will result in errors.
1142*4882a593Smuzhiyun  */
set_pkt_bth_psn(__be32 bthpsn,u8 expct,u32 frags)1143*4882a593Smuzhiyun static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	u32 val = be32_to_cpu(bthpsn),
1146*4882a593Smuzhiyun 		mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1147*4882a593Smuzhiyun 			0xffffffull),
1148*4882a593Smuzhiyun 		psn = val & mask;
1149*4882a593Smuzhiyun 	if (expct)
1150*4882a593Smuzhiyun 		psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
1151*4882a593Smuzhiyun 			((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
1152*4882a593Smuzhiyun 	else
1153*4882a593Smuzhiyun 		psn = psn + frags;
1154*4882a593Smuzhiyun 	return psn & mask;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
set_txreq_header(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)1157*4882a593Smuzhiyun static int set_txreq_header(struct user_sdma_request *req,
1158*4882a593Smuzhiyun 			    struct user_sdma_txreq *tx, u32 datalen)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1161*4882a593Smuzhiyun 	struct hfi1_pkt_header *hdr = &tx->hdr;
1162*4882a593Smuzhiyun 	u8 omfactor; /* KDETH.OM */
1163*4882a593Smuzhiyun 	u16 pbclen;
1164*4882a593Smuzhiyun 	int ret;
1165*4882a593Smuzhiyun 	u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	/* Copy the header template to the request before modification */
1168*4882a593Smuzhiyun 	memcpy(hdr, &req->hdr, sizeof(*hdr));
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/*
1171*4882a593Smuzhiyun 	 * Check if the PBC and LRH length are mismatched. If so
1172*4882a593Smuzhiyun 	 * adjust both in the header.
1173*4882a593Smuzhiyun 	 */
1174*4882a593Smuzhiyun 	pbclen = le16_to_cpu(hdr->pbc[0]);
1175*4882a593Smuzhiyun 	if (PBC2LRH(pbclen) != lrhlen) {
1176*4882a593Smuzhiyun 		pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1177*4882a593Smuzhiyun 		hdr->pbc[0] = cpu_to_le16(pbclen);
1178*4882a593Smuzhiyun 		hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1179*4882a593Smuzhiyun 		/*
1180*4882a593Smuzhiyun 		 * Third packet
1181*4882a593Smuzhiyun 		 * This is the first packet in the sequence that has
1182*4882a593Smuzhiyun 		 * a "static" size that can be used for the rest of
1183*4882a593Smuzhiyun 		 * the packets (besides the last one).
1184*4882a593Smuzhiyun 		 */
1185*4882a593Smuzhiyun 		if (unlikely(req->seqnum == 2)) {
1186*4882a593Smuzhiyun 			/*
1187*4882a593Smuzhiyun 			 * From this point on the lengths in both the
1188*4882a593Smuzhiyun 			 * PBC and LRH are the same until the last
1189*4882a593Smuzhiyun 			 * packet.
1190*4882a593Smuzhiyun 			 * Adjust the template so we don't have to update
1191*4882a593Smuzhiyun 			 * every packet
1192*4882a593Smuzhiyun 			 */
1193*4882a593Smuzhiyun 			req->hdr.pbc[0] = hdr->pbc[0];
1194*4882a593Smuzhiyun 			req->hdr.lrh[2] = hdr->lrh[2];
1195*4882a593Smuzhiyun 		}
1196*4882a593Smuzhiyun 	}
1197*4882a593Smuzhiyun 	/*
1198*4882a593Smuzhiyun 	 * We only have to modify the header if this is not the
1199*4882a593Smuzhiyun 	 * first packet in the request. Otherwise, we use the
1200*4882a593Smuzhiyun 	 * header given to us.
1201*4882a593Smuzhiyun 	 */
1202*4882a593Smuzhiyun 	if (unlikely(!req->seqnum)) {
1203*4882a593Smuzhiyun 		ret = check_header_template(req, hdr, lrhlen, datalen);
1204*4882a593Smuzhiyun 		if (ret)
1205*4882a593Smuzhiyun 			return ret;
1206*4882a593Smuzhiyun 		goto done;
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	hdr->bth[2] = cpu_to_be32(
1210*4882a593Smuzhiyun 		set_pkt_bth_psn(hdr->bth[2],
1211*4882a593Smuzhiyun 				(req_opcode(req->info.ctrl) == EXPECTED),
1212*4882a593Smuzhiyun 				req->seqnum));
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/* Set ACK request on last packet */
1215*4882a593Smuzhiyun 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1216*4882a593Smuzhiyun 		hdr->bth[2] |= cpu_to_be32(1UL << 31);
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	/* Set the new offset */
1219*4882a593Smuzhiyun 	hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1220*4882a593Smuzhiyun 	/* Expected packets have to fill in the new TID information */
1221*4882a593Smuzhiyun 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1222*4882a593Smuzhiyun 		tidval = req->tids[req->tididx];
1223*4882a593Smuzhiyun 		/*
1224*4882a593Smuzhiyun 		 * If the offset puts us at the end of the current TID,
1225*4882a593Smuzhiyun 		 * advance everything.
1226*4882a593Smuzhiyun 		 */
1227*4882a593Smuzhiyun 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1228*4882a593Smuzhiyun 					 PAGE_SIZE)) {
1229*4882a593Smuzhiyun 			req->tidoffset = 0;
1230*4882a593Smuzhiyun 			/*
1231*4882a593Smuzhiyun 			 * Since we don't copy all the TIDs, all at once,
1232*4882a593Smuzhiyun 			 * we have to check again.
1233*4882a593Smuzhiyun 			 */
1234*4882a593Smuzhiyun 			if (++req->tididx > req->n_tids - 1 ||
1235*4882a593Smuzhiyun 			    !req->tids[req->tididx]) {
1236*4882a593Smuzhiyun 				return -EINVAL;
1237*4882a593Smuzhiyun 			}
1238*4882a593Smuzhiyun 			tidval = req->tids[req->tididx];
1239*4882a593Smuzhiyun 		}
1240*4882a593Smuzhiyun 		omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1241*4882a593Smuzhiyun 			KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1242*4882a593Smuzhiyun 			KDETH_OM_SMALL_SHIFT;
1243*4882a593Smuzhiyun 		/* Set KDETH.TIDCtrl based on value for this TID. */
1244*4882a593Smuzhiyun 		KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1245*4882a593Smuzhiyun 			  EXP_TID_GET(tidval, CTRL));
1246*4882a593Smuzhiyun 		/* Set KDETH.TID based on value for this TID */
1247*4882a593Smuzhiyun 		KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1248*4882a593Smuzhiyun 			  EXP_TID_GET(tidval, IDX));
1249*4882a593Smuzhiyun 		/* Clear KDETH.SH when DISABLE_SH flag is set */
1250*4882a593Smuzhiyun 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1251*4882a593Smuzhiyun 			KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1252*4882a593Smuzhiyun 		/*
1253*4882a593Smuzhiyun 		 * Set the KDETH.OFFSET and KDETH.OM based on size of
1254*4882a593Smuzhiyun 		 * transfer.
1255*4882a593Smuzhiyun 		 */
1256*4882a593Smuzhiyun 		trace_hfi1_sdma_user_tid_info(
1257*4882a593Smuzhiyun 			pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1258*4882a593Smuzhiyun 			req->tidoffset, req->tidoffset >> omfactor,
1259*4882a593Smuzhiyun 			omfactor != KDETH_OM_SMALL_SHIFT);
1260*4882a593Smuzhiyun 		KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1261*4882a593Smuzhiyun 			  req->tidoffset >> omfactor);
1262*4882a593Smuzhiyun 		KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1263*4882a593Smuzhiyun 			  omfactor != KDETH_OM_SMALL_SHIFT);
1264*4882a593Smuzhiyun 	}
1265*4882a593Smuzhiyun done:
1266*4882a593Smuzhiyun 	trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1267*4882a593Smuzhiyun 				    req->info.comp_idx, hdr, tidval);
1268*4882a593Smuzhiyun 	return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun 
set_txreq_header_ahg(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)1271*4882a593Smuzhiyun static int set_txreq_header_ahg(struct user_sdma_request *req,
1272*4882a593Smuzhiyun 				struct user_sdma_txreq *tx, u32 datalen)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	u32 ahg[AHG_KDETH_ARRAY_SIZE];
1275*4882a593Smuzhiyun 	int idx = 0;
1276*4882a593Smuzhiyun 	u8 omfactor; /* KDETH.OM */
1277*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1278*4882a593Smuzhiyun 	struct hfi1_pkt_header *hdr = &req->hdr;
1279*4882a593Smuzhiyun 	u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1280*4882a593Smuzhiyun 	u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1281*4882a593Smuzhiyun 	size_t array_size = ARRAY_SIZE(ahg);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	if (PBC2LRH(pbclen) != lrhlen) {
1284*4882a593Smuzhiyun 		/* PBC.PbcLengthDWs */
1285*4882a593Smuzhiyun 		idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
1286*4882a593Smuzhiyun 				     (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
1287*4882a593Smuzhiyun 		if (idx < 0)
1288*4882a593Smuzhiyun 			return idx;
1289*4882a593Smuzhiyun 		/* LRH.PktLen (we need the full 16 bits due to byte swap) */
1290*4882a593Smuzhiyun 		idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
1291*4882a593Smuzhiyun 				     (__force u16)cpu_to_be16(lrhlen >> 2));
1292*4882a593Smuzhiyun 		if (idx < 0)
1293*4882a593Smuzhiyun 			return idx;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/*
1297*4882a593Smuzhiyun 	 * Do the common updates
1298*4882a593Smuzhiyun 	 */
1299*4882a593Smuzhiyun 	/* BTH.PSN and BTH.A */
1300*4882a593Smuzhiyun 	val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1301*4882a593Smuzhiyun 		(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1302*4882a593Smuzhiyun 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1303*4882a593Smuzhiyun 		val32 |= 1UL << 31;
1304*4882a593Smuzhiyun 	idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
1305*4882a593Smuzhiyun 			     (__force u16)cpu_to_be16(val32 >> 16));
1306*4882a593Smuzhiyun 	if (idx < 0)
1307*4882a593Smuzhiyun 		return idx;
1308*4882a593Smuzhiyun 	idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
1309*4882a593Smuzhiyun 			     (__force u16)cpu_to_be16(val32 & 0xffff));
1310*4882a593Smuzhiyun 	if (idx < 0)
1311*4882a593Smuzhiyun 		return idx;
1312*4882a593Smuzhiyun 	/* KDETH.Offset */
1313*4882a593Smuzhiyun 	idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
1314*4882a593Smuzhiyun 			     (__force u16)cpu_to_le16(req->koffset & 0xffff));
1315*4882a593Smuzhiyun 	if (idx < 0)
1316*4882a593Smuzhiyun 		return idx;
1317*4882a593Smuzhiyun 	idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
1318*4882a593Smuzhiyun 			     (__force u16)cpu_to_le16(req->koffset >> 16));
1319*4882a593Smuzhiyun 	if (idx < 0)
1320*4882a593Smuzhiyun 		return idx;
1321*4882a593Smuzhiyun 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1322*4882a593Smuzhiyun 		__le16 val;
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 		tidval = req->tids[req->tididx];
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 		/*
1327*4882a593Smuzhiyun 		 * If the offset puts us at the end of the current TID,
1328*4882a593Smuzhiyun 		 * advance everything.
1329*4882a593Smuzhiyun 		 */
1330*4882a593Smuzhiyun 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1331*4882a593Smuzhiyun 					 PAGE_SIZE)) {
1332*4882a593Smuzhiyun 			req->tidoffset = 0;
1333*4882a593Smuzhiyun 			/*
1334*4882a593Smuzhiyun 			 * Since we don't copy all the TIDs, all at once,
1335*4882a593Smuzhiyun 			 * we have to check again.
1336*4882a593Smuzhiyun 			 */
1337*4882a593Smuzhiyun 			if (++req->tididx > req->n_tids - 1 ||
1338*4882a593Smuzhiyun 			    !req->tids[req->tididx])
1339*4882a593Smuzhiyun 				return -EINVAL;
1340*4882a593Smuzhiyun 			tidval = req->tids[req->tididx];
1341*4882a593Smuzhiyun 		}
1342*4882a593Smuzhiyun 		omfactor = ((EXP_TID_GET(tidval, LEN) *
1343*4882a593Smuzhiyun 				  PAGE_SIZE) >=
1344*4882a593Smuzhiyun 				 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1345*4882a593Smuzhiyun 				 KDETH_OM_SMALL_SHIFT;
1346*4882a593Smuzhiyun 		/* KDETH.OM and KDETH.OFFSET (TID) */
1347*4882a593Smuzhiyun 		idx = ahg_header_set(
1348*4882a593Smuzhiyun 				ahg, idx, array_size, 7, 0, 16,
1349*4882a593Smuzhiyun 				((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1350*4882a593Smuzhiyun 				((req->tidoffset >> omfactor)
1351*4882a593Smuzhiyun 				& 0x7fff)));
1352*4882a593Smuzhiyun 		if (idx < 0)
1353*4882a593Smuzhiyun 			return idx;
1354*4882a593Smuzhiyun 		/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1355*4882a593Smuzhiyun 		val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1356*4882a593Smuzhiyun 				   (EXP_TID_GET(tidval, IDX) & 0x3ff));
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1359*4882a593Smuzhiyun 			val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1360*4882a593Smuzhiyun 						      INTR) <<
1361*4882a593Smuzhiyun 					    AHG_KDETH_INTR_SHIFT));
1362*4882a593Smuzhiyun 		} else {
1363*4882a593Smuzhiyun 			val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1364*4882a593Smuzhiyun 			       cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1365*4882a593Smuzhiyun 			       cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1366*4882a593Smuzhiyun 						      INTR) <<
1367*4882a593Smuzhiyun 					     AHG_KDETH_INTR_SHIFT));
1368*4882a593Smuzhiyun 		}
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 		idx = ahg_header_set(ahg, idx, array_size,
1371*4882a593Smuzhiyun 				     7, 16, 14, (__force u16)val);
1372*4882a593Smuzhiyun 		if (idx < 0)
1373*4882a593Smuzhiyun 			return idx;
1374*4882a593Smuzhiyun 	}
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1377*4882a593Smuzhiyun 					req->info.comp_idx, req->sde->this_idx,
1378*4882a593Smuzhiyun 					req->ahg_idx, ahg, idx, tidval);
1379*4882a593Smuzhiyun 	sdma_txinit_ahg(&tx->txreq,
1380*4882a593Smuzhiyun 			SDMA_TXREQ_F_USE_AHG,
1381*4882a593Smuzhiyun 			datalen, req->ahg_idx, idx,
1382*4882a593Smuzhiyun 			ahg, sizeof(req->hdr),
1383*4882a593Smuzhiyun 			user_sdma_txreq_cb);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	return idx;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun /**
1389*4882a593Smuzhiyun  * user_sdma_txreq_cb() - SDMA tx request completion callback.
1390*4882a593Smuzhiyun  * @txreq: valid sdma tx request
1391*4882a593Smuzhiyun  * @status: success/failure of request
1392*4882a593Smuzhiyun  *
1393*4882a593Smuzhiyun  * Called when the SDMA progress state machine gets notification that
1394*4882a593Smuzhiyun  * the SDMA descriptors for this tx request have been processed by the
1395*4882a593Smuzhiyun  * DMA engine. Called in interrupt context.
1396*4882a593Smuzhiyun  * Only do work on completed sequences.
1397*4882a593Smuzhiyun  */
user_sdma_txreq_cb(struct sdma_txreq * txreq,int status)1398*4882a593Smuzhiyun static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun 	struct user_sdma_txreq *tx =
1401*4882a593Smuzhiyun 		container_of(txreq, struct user_sdma_txreq, txreq);
1402*4882a593Smuzhiyun 	struct user_sdma_request *req;
1403*4882a593Smuzhiyun 	struct hfi1_user_sdma_pkt_q *pq;
1404*4882a593Smuzhiyun 	struct hfi1_user_sdma_comp_q *cq;
1405*4882a593Smuzhiyun 	enum hfi1_sdma_comp_state state = COMPLETE;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (!tx->req)
1408*4882a593Smuzhiyun 		return;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	req = tx->req;
1411*4882a593Smuzhiyun 	pq = req->pq;
1412*4882a593Smuzhiyun 	cq = req->cq;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	if (status != SDMA_TXREQ_S_OK) {
1415*4882a593Smuzhiyun 		SDMA_DBG(req, "SDMA completion with error %d",
1416*4882a593Smuzhiyun 			 status);
1417*4882a593Smuzhiyun 		WRITE_ONCE(req->has_error, 1);
1418*4882a593Smuzhiyun 		state = ERROR;
1419*4882a593Smuzhiyun 	}
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	req->seqcomp = tx->seqnum;
1422*4882a593Smuzhiyun 	kmem_cache_free(pq->txreq_cache, tx);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/* sequence isn't complete?  We are done */
1425*4882a593Smuzhiyun 	if (req->seqcomp != req->info.npkts - 1)
1426*4882a593Smuzhiyun 		return;
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	user_sdma_free_request(req, false);
1429*4882a593Smuzhiyun 	set_comp_state(pq, cq, req->info.comp_idx, state, status);
1430*4882a593Smuzhiyun 	pq_update(pq);
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
pq_update(struct hfi1_user_sdma_pkt_q * pq)1433*4882a593Smuzhiyun static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun 	if (atomic_dec_and_test(&pq->n_reqs))
1436*4882a593Smuzhiyun 		wake_up(&pq->wait);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
user_sdma_free_request(struct user_sdma_request * req,bool unpin)1439*4882a593Smuzhiyun static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	int i;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	if (!list_empty(&req->txps)) {
1444*4882a593Smuzhiyun 		struct sdma_txreq *t, *p;
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 		list_for_each_entry_safe(t, p, &req->txps, list) {
1447*4882a593Smuzhiyun 			struct user_sdma_txreq *tx =
1448*4882a593Smuzhiyun 				container_of(t, struct user_sdma_txreq, txreq);
1449*4882a593Smuzhiyun 			list_del_init(&t->list);
1450*4882a593Smuzhiyun 			sdma_txclean(req->pq->dd, t);
1451*4882a593Smuzhiyun 			kmem_cache_free(req->pq->txreq_cache, tx);
1452*4882a593Smuzhiyun 		}
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	for (i = 0; i < req->data_iovs; i++) {
1456*4882a593Smuzhiyun 		struct sdma_mmu_node *node = req->iovs[i].node;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 		if (!node)
1459*4882a593Smuzhiyun 			continue;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 		req->iovs[i].node = NULL;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 		if (unpin)
1464*4882a593Smuzhiyun 			hfi1_mmu_rb_remove(req->pq->handler,
1465*4882a593Smuzhiyun 					   &node->rb);
1466*4882a593Smuzhiyun 		else
1467*4882a593Smuzhiyun 			atomic_dec(&node->refcount);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	kfree(req->tids);
1471*4882a593Smuzhiyun 	clear_bit(req->info.comp_idx, req->pq->req_in_use);
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun 
set_comp_state(struct hfi1_user_sdma_pkt_q * pq,struct hfi1_user_sdma_comp_q * cq,u16 idx,enum hfi1_sdma_comp_state state,int ret)1474*4882a593Smuzhiyun static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1475*4882a593Smuzhiyun 				  struct hfi1_user_sdma_comp_q *cq,
1476*4882a593Smuzhiyun 				  u16 idx, enum hfi1_sdma_comp_state state,
1477*4882a593Smuzhiyun 				  int ret)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	if (state == ERROR)
1480*4882a593Smuzhiyun 		cq->comps[idx].errcode = -ret;
1481*4882a593Smuzhiyun 	smp_wmb(); /* make sure errcode is visible first */
1482*4882a593Smuzhiyun 	cq->comps[idx].status = state;
1483*4882a593Smuzhiyun 	trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1484*4882a593Smuzhiyun 					idx, state, ret);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
sdma_rb_filter(struct mmu_rb_node * node,unsigned long addr,unsigned long len)1487*4882a593Smuzhiyun static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1488*4882a593Smuzhiyun 			   unsigned long len)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun 	return (bool)(node->addr == addr);
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun 
sdma_rb_insert(void * arg,struct mmu_rb_node * mnode)1493*4882a593Smuzhiyun static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	struct sdma_mmu_node *node =
1496*4882a593Smuzhiyun 		container_of(mnode, struct sdma_mmu_node, rb);
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	atomic_inc(&node->refcount);
1499*4882a593Smuzhiyun 	return 0;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun  * Return 1 to remove the node from the rb tree and call the remove op.
1504*4882a593Smuzhiyun  *
1505*4882a593Smuzhiyun  * Called with the rb tree lock held.
1506*4882a593Smuzhiyun  */
sdma_rb_evict(void * arg,struct mmu_rb_node * mnode,void * evict_arg,bool * stop)1507*4882a593Smuzhiyun static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1508*4882a593Smuzhiyun 			 void *evict_arg, bool *stop)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun 	struct sdma_mmu_node *node =
1511*4882a593Smuzhiyun 		container_of(mnode, struct sdma_mmu_node, rb);
1512*4882a593Smuzhiyun 	struct evict_data *evict_data = evict_arg;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	/* is this node still being used? */
1515*4882a593Smuzhiyun 	if (atomic_read(&node->refcount))
1516*4882a593Smuzhiyun 		return 0; /* keep this node */
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	/* this node will be evicted, add its pages to our count */
1519*4882a593Smuzhiyun 	evict_data->cleared += node->npages;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	/* have enough pages been cleared? */
1522*4882a593Smuzhiyun 	if (evict_data->cleared >= evict_data->target)
1523*4882a593Smuzhiyun 		*stop = true;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	return 1; /* remove this node */
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun 
sdma_rb_remove(void * arg,struct mmu_rb_node * mnode)1528*4882a593Smuzhiyun static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	struct sdma_mmu_node *node =
1531*4882a593Smuzhiyun 		container_of(mnode, struct sdma_mmu_node, rb);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	unpin_sdma_pages(node);
1534*4882a593Smuzhiyun 	kfree(node);
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun 
sdma_rb_invalidate(void * arg,struct mmu_rb_node * mnode)1537*4882a593Smuzhiyun static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun 	struct sdma_mmu_node *node =
1540*4882a593Smuzhiyun 		container_of(mnode, struct sdma_mmu_node, rb);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	if (!atomic_read(&node->refcount))
1543*4882a593Smuzhiyun 		return 1;
1544*4882a593Smuzhiyun 	return 0;
1545*4882a593Smuzhiyun }
1546