xref: /OK3568_Linux_fs/kernel/include/drm/spsc_queue.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2017 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
25*4882a593Smuzhiyun #define DRM_SCHEDULER_SPSC_QUEUE_H_
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/atomic.h>
28*4882a593Smuzhiyun #include <linux/preempt.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /** SPSC lockless queue */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct spsc_node {
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	/* Stores spsc_node* */
35*4882a593Smuzhiyun 	struct spsc_node *next;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct spsc_queue {
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	 struct spsc_node *head;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* atomic pointer to struct spsc_node* */
43*4882a593Smuzhiyun 	atomic_long_t tail;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	atomic_t job_count;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
spsc_queue_init(struct spsc_queue * queue)48*4882a593Smuzhiyun static inline void spsc_queue_init(struct spsc_queue *queue)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	queue->head = NULL;
51*4882a593Smuzhiyun 	atomic_long_set(&queue->tail, (long)&queue->head);
52*4882a593Smuzhiyun 	atomic_set(&queue->job_count, 0);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
spsc_queue_peek(struct spsc_queue * queue)55*4882a593Smuzhiyun static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	return queue->head;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
spsc_queue_count(struct spsc_queue * queue)60*4882a593Smuzhiyun static inline int spsc_queue_count(struct spsc_queue *queue)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	return atomic_read(&queue->job_count);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
spsc_queue_push(struct spsc_queue * queue,struct spsc_node * node)65*4882a593Smuzhiyun static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct spsc_node **tail;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	node->next = NULL;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	preempt_disable();
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
74*4882a593Smuzhiyun 	WRITE_ONCE(*tail, node);
75*4882a593Smuzhiyun 	atomic_inc(&queue->job_count);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * In case of first element verify new node will be visible to the consumer
79*4882a593Smuzhiyun 	 * thread when we ping the kernel thread that there is new work to do.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	smp_wmb();
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	preempt_enable();
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return tail == &queue->head;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 
spsc_queue_pop(struct spsc_queue * queue)89*4882a593Smuzhiyun static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct spsc_node *next, *node;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* Verify reading from memory and not the cache */
94*4882a593Smuzhiyun 	smp_rmb();
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	node = READ_ONCE(queue->head);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (!node)
99*4882a593Smuzhiyun 		return NULL;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	next = READ_ONCE(node->next);
102*4882a593Smuzhiyun 	WRITE_ONCE(queue->head, next);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (unlikely(!next)) {
105*4882a593Smuzhiyun 		/* slowpath for the last element in the queue */
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		if (atomic_long_cmpxchg(&queue->tail,
108*4882a593Smuzhiyun 				(long)&node->next, (long) &queue->head) != (long)&node->next) {
109*4882a593Smuzhiyun 			/* Updating tail failed wait for new next to appear */
110*4882a593Smuzhiyun 			do {
111*4882a593Smuzhiyun 				smp_rmb();
112*4882a593Smuzhiyun 			} while (unlikely(!(queue->head = READ_ONCE(node->next))));
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	atomic_dec(&queue->job_count);
117*4882a593Smuzhiyun 	return node;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
123