1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef RXE_QUEUE_H
8*4882a593Smuzhiyun #define RXE_QUEUE_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /* implements a simple circular buffer that can optionally be
11*4882a593Smuzhiyun * shared between user space and the kernel and can be resized
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun * the requested element size is rounded up to a power of 2
14*4882a593Smuzhiyun * and the number of elements in the buffer is also rounded
15*4882a593Smuzhiyun * up to a power of 2. Since the queue is empty when the
16*4882a593Smuzhiyun * producer and consumer indices match the maximum capacity
17*4882a593Smuzhiyun * of the queue is one less than the number of element slots
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* this data structure is shared between user space and kernel
21*4882a593Smuzhiyun * space for those cases where the queue is shared. It contains
22*4882a593Smuzhiyun * the producer and consumer indices. Is also contains a copy
23*4882a593Smuzhiyun * of the queue size parameters for user space to use but the
24*4882a593Smuzhiyun * kernel must use the parameters in the rxe_queue struct
25*4882a593Smuzhiyun * this MUST MATCH the corresponding librxe struct
26*4882a593Smuzhiyun * for performance reasons arrange to have producer and consumer
27*4882a593Smuzhiyun * pointers in separate cache lines
28*4882a593Smuzhiyun * the kernel should always mask the indices to avoid accessing
29*4882a593Smuzhiyun * memory outside of the data area
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun struct rxe_queue_buf {
32*4882a593Smuzhiyun __u32 log2_elem_size;
33*4882a593Smuzhiyun __u32 index_mask;
34*4882a593Smuzhiyun __u32 pad_1[30];
35*4882a593Smuzhiyun __u32 producer_index;
36*4882a593Smuzhiyun __u32 pad_2[31];
37*4882a593Smuzhiyun __u32 consumer_index;
38*4882a593Smuzhiyun __u32 pad_3[31];
39*4882a593Smuzhiyun __u8 data[];
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct rxe_queue {
43*4882a593Smuzhiyun struct rxe_dev *rxe;
44*4882a593Smuzhiyun struct rxe_queue_buf *buf;
45*4882a593Smuzhiyun struct rxe_mmap_info *ip;
46*4882a593Smuzhiyun size_t buf_size;
47*4882a593Smuzhiyun size_t elem_size;
48*4882a593Smuzhiyun unsigned int log2_elem_size;
49*4882a593Smuzhiyun unsigned int index_mask;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
53*4882a593Smuzhiyun struct ib_udata *udata, struct rxe_queue_buf *buf,
54*4882a593Smuzhiyun size_t buf_size, struct rxe_mmap_info **ip_p);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun void rxe_queue_reset(struct rxe_queue *q);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
59*4882a593Smuzhiyun int *num_elem,
60*4882a593Smuzhiyun unsigned int elem_size);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
63*4882a593Smuzhiyun unsigned int elem_size, struct ib_udata *udata,
64*4882a593Smuzhiyun struct mminfo __user *outbuf,
65*4882a593Smuzhiyun /* Protect producers while resizing queue */
66*4882a593Smuzhiyun spinlock_t *producer_lock,
67*4882a593Smuzhiyun /* Protect consumers while resizing queue */
68*4882a593Smuzhiyun spinlock_t *consumer_lock);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun void rxe_queue_cleanup(struct rxe_queue *queue);
71*4882a593Smuzhiyun
next_index(struct rxe_queue * q,int index)72*4882a593Smuzhiyun static inline int next_index(struct rxe_queue *q, int index)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return (index + 1) & q->buf->index_mask;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
queue_empty(struct rxe_queue * q)77*4882a593Smuzhiyun static inline int queue_empty(struct rxe_queue *q)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun return ((q->buf->producer_index - q->buf->consumer_index)
80*4882a593Smuzhiyun & q->index_mask) == 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
queue_full(struct rxe_queue * q)83*4882a593Smuzhiyun static inline int queue_full(struct rxe_queue *q)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun return ((q->buf->producer_index + 1 - q->buf->consumer_index)
86*4882a593Smuzhiyun & q->index_mask) == 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
advance_producer(struct rxe_queue * q)89*4882a593Smuzhiyun static inline void advance_producer(struct rxe_queue *q)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun q->buf->producer_index = (q->buf->producer_index + 1)
92*4882a593Smuzhiyun & q->index_mask;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
advance_consumer(struct rxe_queue * q)95*4882a593Smuzhiyun static inline void advance_consumer(struct rxe_queue *q)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun q->buf->consumer_index = (q->buf->consumer_index + 1)
98*4882a593Smuzhiyun & q->index_mask;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
producer_addr(struct rxe_queue * q)101*4882a593Smuzhiyun static inline void *producer_addr(struct rxe_queue *q)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun return q->buf->data + ((q->buf->producer_index & q->index_mask)
104*4882a593Smuzhiyun << q->log2_elem_size);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
consumer_addr(struct rxe_queue * q)107*4882a593Smuzhiyun static inline void *consumer_addr(struct rxe_queue *q)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return q->buf->data + ((q->buf->consumer_index & q->index_mask)
110*4882a593Smuzhiyun << q->log2_elem_size);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
producer_index(struct rxe_queue * q)113*4882a593Smuzhiyun static inline unsigned int producer_index(struct rxe_queue *q)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return q->buf->producer_index;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
consumer_index(struct rxe_queue * q)118*4882a593Smuzhiyun static inline unsigned int consumer_index(struct rxe_queue *q)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return q->buf->consumer_index;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
addr_from_index(struct rxe_queue * q,unsigned int index)123*4882a593Smuzhiyun static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return q->buf->data + ((index & q->index_mask)
126*4882a593Smuzhiyun << q->buf->log2_elem_size);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
index_from_addr(const struct rxe_queue * q,const void * addr)129*4882a593Smuzhiyun static inline unsigned int index_from_addr(const struct rxe_queue *q,
130*4882a593Smuzhiyun const void *addr)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
133*4882a593Smuzhiyun & q->index_mask;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
queue_count(const struct rxe_queue * q)136*4882a593Smuzhiyun static inline unsigned int queue_count(const struct rxe_queue *q)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return (q->buf->producer_index - q->buf->consumer_index)
139*4882a593Smuzhiyun & q->index_mask;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
queue_head(struct rxe_queue * q)142*4882a593Smuzhiyun static inline void *queue_head(struct rxe_queue *q)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return queue_empty(q) ? NULL : consumer_addr(q);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #endif /* RXE_QUEUE_H */
148