xref: /OK3568_Linux_fs/kernel/tools/io_uring/setup.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #include <sys/types.h>
2*4882a593Smuzhiyun #include <sys/stat.h>
3*4882a593Smuzhiyun #include <sys/mman.h>
4*4882a593Smuzhiyun #include <unistd.h>
5*4882a593Smuzhiyun #include <errno.h>
6*4882a593Smuzhiyun #include <string.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "liburing.h"
9*4882a593Smuzhiyun 
io_uring_mmap(int fd,struct io_uring_params * p,struct io_uring_sq * sq,struct io_uring_cq * cq)10*4882a593Smuzhiyun static int io_uring_mmap(int fd, struct io_uring_params *p,
11*4882a593Smuzhiyun 			 struct io_uring_sq *sq, struct io_uring_cq *cq)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	size_t size;
14*4882a593Smuzhiyun 	void *ptr;
15*4882a593Smuzhiyun 	int ret;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
18*4882a593Smuzhiyun 	ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
19*4882a593Smuzhiyun 			MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
20*4882a593Smuzhiyun 	if (ptr == MAP_FAILED)
21*4882a593Smuzhiyun 		return -errno;
22*4882a593Smuzhiyun 	sq->khead = ptr + p->sq_off.head;
23*4882a593Smuzhiyun 	sq->ktail = ptr + p->sq_off.tail;
24*4882a593Smuzhiyun 	sq->kring_mask = ptr + p->sq_off.ring_mask;
25*4882a593Smuzhiyun 	sq->kring_entries = ptr + p->sq_off.ring_entries;
26*4882a593Smuzhiyun 	sq->kflags = ptr + p->sq_off.flags;
27*4882a593Smuzhiyun 	sq->kdropped = ptr + p->sq_off.dropped;
28*4882a593Smuzhiyun 	sq->array = ptr + p->sq_off.array;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	size = p->sq_entries * sizeof(struct io_uring_sqe);
31*4882a593Smuzhiyun 	sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
32*4882a593Smuzhiyun 				MAP_SHARED | MAP_POPULATE, fd,
33*4882a593Smuzhiyun 				IORING_OFF_SQES);
34*4882a593Smuzhiyun 	if (sq->sqes == MAP_FAILED) {
35*4882a593Smuzhiyun 		ret = -errno;
36*4882a593Smuzhiyun err:
37*4882a593Smuzhiyun 		munmap(sq->khead, sq->ring_sz);
38*4882a593Smuzhiyun 		return ret;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
42*4882a593Smuzhiyun 	ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
43*4882a593Smuzhiyun 			MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
44*4882a593Smuzhiyun 	if (ptr == MAP_FAILED) {
45*4882a593Smuzhiyun 		ret = -errno;
46*4882a593Smuzhiyun 		munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
47*4882a593Smuzhiyun 		goto err;
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 	cq->khead = ptr + p->cq_off.head;
50*4882a593Smuzhiyun 	cq->ktail = ptr + p->cq_off.tail;
51*4882a593Smuzhiyun 	cq->kring_mask = ptr + p->cq_off.ring_mask;
52*4882a593Smuzhiyun 	cq->kring_entries = ptr + p->cq_off.ring_entries;
53*4882a593Smuzhiyun 	cq->koverflow = ptr + p->cq_off.overflow;
54*4882a593Smuzhiyun 	cq->cqes = ptr + p->cq_off.cqes;
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * For users that want to specify sq_thread_cpu or sq_thread_idle, this
60*4882a593Smuzhiyun  * interface is a convenient helper for mmap()ing the rings.
61*4882a593Smuzhiyun  * Returns -1 on error, or zero on success.  On success, 'ring'
62*4882a593Smuzhiyun  * contains the necessary information to read/write to the rings.
63*4882a593Smuzhiyun  */
io_uring_queue_mmap(int fd,struct io_uring_params * p,struct io_uring * ring)64*4882a593Smuzhiyun int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	int ret;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	memset(ring, 0, sizeof(*ring));
69*4882a593Smuzhiyun 	ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
70*4882a593Smuzhiyun 	if (!ret)
71*4882a593Smuzhiyun 		ring->ring_fd = fd;
72*4882a593Smuzhiyun 	return ret;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * Returns -1 on error, or zero on success. On success, 'ring'
77*4882a593Smuzhiyun  * contains the necessary information to read/write to the rings.
78*4882a593Smuzhiyun  */
io_uring_queue_init(unsigned entries,struct io_uring * ring,unsigned flags)79*4882a593Smuzhiyun int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct io_uring_params p;
82*4882a593Smuzhiyun 	int fd, ret;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	memset(&p, 0, sizeof(p));
85*4882a593Smuzhiyun 	p.flags = flags;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	fd = io_uring_setup(entries, &p);
88*4882a593Smuzhiyun 	if (fd < 0)
89*4882a593Smuzhiyun 		return fd;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	ret = io_uring_queue_mmap(fd, &p, ring);
92*4882a593Smuzhiyun 	if (ret)
93*4882a593Smuzhiyun 		close(fd);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return ret;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
io_uring_queue_exit(struct io_uring * ring)98*4882a593Smuzhiyun void io_uring_queue_exit(struct io_uring *ring)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct io_uring_sq *sq = &ring->sq;
101*4882a593Smuzhiyun 	struct io_uring_cq *cq = &ring->cq;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
104*4882a593Smuzhiyun 	munmap(sq->khead, sq->ring_sz);
105*4882a593Smuzhiyun 	munmap(cq->khead, cq->ring_sz);
106*4882a593Smuzhiyun 	close(ring->ring_fd);
107*4882a593Smuzhiyun }
108