xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_queue.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/vmalloc.h>
8*4882a593Smuzhiyun #include "rxe.h"
9*4882a593Smuzhiyun #include "rxe_loc.h"
10*4882a593Smuzhiyun #include "rxe_queue.h"
11*4882a593Smuzhiyun 
do_mmap_info(struct rxe_dev * rxe,struct mminfo __user * outbuf,struct ib_udata * udata,struct rxe_queue_buf * buf,size_t buf_size,struct rxe_mmap_info ** ip_p)12*4882a593Smuzhiyun int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
13*4882a593Smuzhiyun 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
14*4882a593Smuzhiyun 		 size_t buf_size, struct rxe_mmap_info **ip_p)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	int err;
17*4882a593Smuzhiyun 	struct rxe_mmap_info *ip = NULL;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	if (outbuf) {
20*4882a593Smuzhiyun 		ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
21*4882a593Smuzhiyun 		if (IS_ERR(ip)) {
22*4882a593Smuzhiyun 			err = PTR_ERR(ip);
23*4882a593Smuzhiyun 			goto err1;
24*4882a593Smuzhiyun 		}
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 		if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
27*4882a593Smuzhiyun 			err = -EFAULT;
28*4882a593Smuzhiyun 			goto err2;
29*4882a593Smuzhiyun 		}
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 		spin_lock_bh(&rxe->pending_lock);
32*4882a593Smuzhiyun 		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
33*4882a593Smuzhiyun 		spin_unlock_bh(&rxe->pending_lock);
34*4882a593Smuzhiyun 	}
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	*ip_p = ip;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	return 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun err2:
41*4882a593Smuzhiyun 	kfree(ip);
42*4882a593Smuzhiyun err1:
43*4882a593Smuzhiyun 	return err;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
rxe_queue_reset(struct rxe_queue * q)46*4882a593Smuzhiyun inline void rxe_queue_reset(struct rxe_queue *q)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	/* queue is comprised from header and the memory
49*4882a593Smuzhiyun 	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
50*4882a593Smuzhiyun 	 * reset only the queue itself and not the management header
51*4882a593Smuzhiyun 	 */
52*4882a593Smuzhiyun 	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
rxe_queue_init(struct rxe_dev * rxe,int * num_elem,unsigned int elem_size)55*4882a593Smuzhiyun struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
56*4882a593Smuzhiyun 				 int *num_elem,
57*4882a593Smuzhiyun 				 unsigned int elem_size)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct rxe_queue *q;
60*4882a593Smuzhiyun 	size_t buf_size;
61*4882a593Smuzhiyun 	unsigned int num_slots;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/* num_elem == 0 is allowed, but uninteresting */
64*4882a593Smuzhiyun 	if (*num_elem < 0)
65*4882a593Smuzhiyun 		goto err1;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	q = kmalloc(sizeof(*q), GFP_KERNEL);
68*4882a593Smuzhiyun 	if (!q)
69*4882a593Smuzhiyun 		goto err1;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	q->rxe = rxe;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* used in resize, only need to copy used part of queue */
74*4882a593Smuzhiyun 	q->elem_size = elem_size;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* pad element up to at least a cacheline and always a power of 2 */
77*4882a593Smuzhiyun 	if (elem_size < cache_line_size())
78*4882a593Smuzhiyun 		elem_size = cache_line_size();
79*4882a593Smuzhiyun 	elem_size = roundup_pow_of_two(elem_size);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	q->log2_elem_size = order_base_2(elem_size);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	num_slots = *num_elem + 1;
84*4882a593Smuzhiyun 	num_slots = roundup_pow_of_two(num_slots);
85*4882a593Smuzhiyun 	q->index_mask = num_slots - 1;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	q->buf = vmalloc_user(buf_size);
90*4882a593Smuzhiyun 	if (!q->buf)
91*4882a593Smuzhiyun 		goto err2;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	q->buf->log2_elem_size = q->log2_elem_size;
94*4882a593Smuzhiyun 	q->buf->index_mask = q->index_mask;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	q->buf_size = buf_size;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	*num_elem = num_slots - 1;
99*4882a593Smuzhiyun 	return q;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun err2:
102*4882a593Smuzhiyun 	kfree(q);
103*4882a593Smuzhiyun err1:
104*4882a593Smuzhiyun 	return NULL;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* copies elements from original q to new q and then swaps the contents of the
108*4882a593Smuzhiyun  * two q headers. This is so that if anyone is holding a pointer to q it will
109*4882a593Smuzhiyun  * still work
110*4882a593Smuzhiyun  */
resize_finish(struct rxe_queue * q,struct rxe_queue * new_q,unsigned int num_elem)111*4882a593Smuzhiyun static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
112*4882a593Smuzhiyun 			 unsigned int num_elem)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	if (!queue_empty(q) && (num_elem < queue_count(q)))
115*4882a593Smuzhiyun 		return -EINVAL;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	while (!queue_empty(q)) {
118*4882a593Smuzhiyun 		memcpy(producer_addr(new_q), consumer_addr(q),
119*4882a593Smuzhiyun 		       new_q->elem_size);
120*4882a593Smuzhiyun 		advance_producer(new_q);
121*4882a593Smuzhiyun 		advance_consumer(q);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	swap(*q, *new_q);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
rxe_queue_resize(struct rxe_queue * q,unsigned int * num_elem_p,unsigned int elem_size,struct ib_udata * udata,struct mminfo __user * outbuf,spinlock_t * producer_lock,spinlock_t * consumer_lock)129*4882a593Smuzhiyun int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
130*4882a593Smuzhiyun 		     unsigned int elem_size, struct ib_udata *udata,
131*4882a593Smuzhiyun 		     struct mminfo __user *outbuf, spinlock_t *producer_lock,
132*4882a593Smuzhiyun 		     spinlock_t *consumer_lock)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct rxe_queue *new_q;
135*4882a593Smuzhiyun 	unsigned int num_elem = *num_elem_p;
136*4882a593Smuzhiyun 	int err;
137*4882a593Smuzhiyun 	unsigned long flags = 0, flags1;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
140*4882a593Smuzhiyun 	if (!new_q)
141*4882a593Smuzhiyun 		return -ENOMEM;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
144*4882a593Smuzhiyun 			   new_q->buf_size, &new_q->ip);
145*4882a593Smuzhiyun 	if (err) {
146*4882a593Smuzhiyun 		vfree(new_q->buf);
147*4882a593Smuzhiyun 		kfree(new_q);
148*4882a593Smuzhiyun 		goto err1;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	spin_lock_irqsave(consumer_lock, flags1);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (producer_lock) {
154*4882a593Smuzhiyun 		spin_lock_irqsave(producer_lock, flags);
155*4882a593Smuzhiyun 		err = resize_finish(q, new_q, num_elem);
156*4882a593Smuzhiyun 		spin_unlock_irqrestore(producer_lock, flags);
157*4882a593Smuzhiyun 	} else {
158*4882a593Smuzhiyun 		err = resize_finish(q, new_q, num_elem);
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	spin_unlock_irqrestore(consumer_lock, flags1);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	rxe_queue_cleanup(new_q);	/* new/old dep on err */
164*4882a593Smuzhiyun 	if (err)
165*4882a593Smuzhiyun 		goto err1;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	*num_elem_p = num_elem;
168*4882a593Smuzhiyun 	return 0;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun err1:
171*4882a593Smuzhiyun 	return err;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
rxe_queue_cleanup(struct rxe_queue * q)174*4882a593Smuzhiyun void rxe_queue_cleanup(struct rxe_queue *q)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	if (q->ip)
177*4882a593Smuzhiyun 		kref_put(&q->ip->ref, rxe_mmap_release);
178*4882a593Smuzhiyun 	else
179*4882a593Smuzhiyun 		vfree(q->buf);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	kfree(q);
182*4882a593Smuzhiyun }
183