xref: /OK3568_Linux_fs/kernel/include/rdma/rdmavt_cq.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2016 - 2018 Intel Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef DEF_RDMAVT_INCCQ_H
7*4882a593Smuzhiyun #define DEF_RDMAVT_INCCQ_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kthread.h>
10*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
11*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * Define an ib_cq_notify value that is not valid so we know when CQ
15*4882a593Smuzhiyun  * notifications are armed.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define RVT_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Define read macro that apply smp_load_acquire memory barrier
21*4882a593Smuzhiyun  * when reading indice of circular buffer that mmaped to user space.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Define write macro that uses smp_store_release memory barrier
27*4882a593Smuzhiyun  * when writing indice of circular buffer that mmaped to user space.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #define RDMA_WRITE_UAPI_ATOMIC(member, x) smp_store_release(&(member).val, x)
30*4882a593Smuzhiyun #include <rdma/rvt-abi.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * This structure is used to contain the head pointer, tail pointer,
34*4882a593Smuzhiyun  * and completion queue entries as a single memory allocation so
35*4882a593Smuzhiyun  * it can be mmap'ed into user space.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun struct rvt_k_cq_wc {
38*4882a593Smuzhiyun 	u32 head;               /* index of next entry to fill */
39*4882a593Smuzhiyun 	u32 tail;               /* index of next ib_poll_cq() entry */
40*4882a593Smuzhiyun 	struct ib_wc kqueue[];
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * The completion queue structure.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun struct rvt_cq {
47*4882a593Smuzhiyun 	struct ib_cq ibcq;
48*4882a593Smuzhiyun 	struct work_struct comptask;
49*4882a593Smuzhiyun 	spinlock_t lock; /* protect changes in this struct */
50*4882a593Smuzhiyun 	u8 notify;
51*4882a593Smuzhiyun 	u8 triggered;
52*4882a593Smuzhiyun 	u8 cq_full;
53*4882a593Smuzhiyun 	int comp_vector_cpu;
54*4882a593Smuzhiyun 	struct rvt_dev_info *rdi;
55*4882a593Smuzhiyun 	struct rvt_cq_wc *queue;
56*4882a593Smuzhiyun 	struct rvt_mmap_info *ip;
57*4882a593Smuzhiyun 	struct rvt_k_cq_wc *kqueue;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
ibcq_to_rvtcq(struct ib_cq * ibcq)60*4882a593Smuzhiyun static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	return container_of(ibcq, struct rvt_cq, ibcq);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #endif          /* DEF_RDMAVT_INCCQH */
68