xref: /OK3568_Linux_fs/kernel/fs/btrfs/misc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #ifndef BTRFS_MISC_H
4*4882a593Smuzhiyun #define BTRFS_MISC_H
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/wait.h>
8*4882a593Smuzhiyun #include <asm/div64.h>
9*4882a593Smuzhiyun #include <linux/rbtree.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
12*4882a593Smuzhiyun 
cond_wake_up(struct wait_queue_head * wq)13*4882a593Smuzhiyun static inline void cond_wake_up(struct wait_queue_head *wq)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	/*
16*4882a593Smuzhiyun 	 * This implies a full smp_mb barrier, see comments for
17*4882a593Smuzhiyun 	 * waitqueue_active why.
18*4882a593Smuzhiyun 	 */
19*4882a593Smuzhiyun 	if (wq_has_sleeper(wq))
20*4882a593Smuzhiyun 		wake_up(wq);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
cond_wake_up_nomb(struct wait_queue_head * wq)23*4882a593Smuzhiyun static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	/*
26*4882a593Smuzhiyun 	 * Special case for conditional wakeup where the barrier required for
27*4882a593Smuzhiyun 	 * waitqueue_active is implied by some of the preceding code. Eg. one
28*4882a593Smuzhiyun 	 * of such atomic operations (atomic_dec_and_return, ...), or a
29*4882a593Smuzhiyun 	 * unlock/lock sequence, etc.
30*4882a593Smuzhiyun 	 */
31*4882a593Smuzhiyun 	if (waitqueue_active(wq))
32*4882a593Smuzhiyun 		wake_up(wq);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
div_factor(u64 num,int factor)35*4882a593Smuzhiyun static inline u64 div_factor(u64 num, int factor)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (factor == 10)
38*4882a593Smuzhiyun 		return num;
39*4882a593Smuzhiyun 	num *= factor;
40*4882a593Smuzhiyun 	return div_u64(num, 10);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
div_factor_fine(u64 num,int factor)43*4882a593Smuzhiyun static inline u64 div_factor_fine(u64 num, int factor)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	if (factor == 100)
46*4882a593Smuzhiyun 		return num;
47*4882a593Smuzhiyun 	num *= factor;
48*4882a593Smuzhiyun 	return div_u64(num, 100);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* Copy of is_power_of_two that is 64bit safe */
is_power_of_two_u64(u64 n)52*4882a593Smuzhiyun static inline bool is_power_of_two_u64(u64 n)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return n != 0 && (n & (n - 1)) == 0;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
has_single_bit_set(u64 n)57*4882a593Smuzhiyun static inline bool has_single_bit_set(u64 n)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return is_power_of_two_u64(n);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun  * Simple bytenr based rb_tree relate structures
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Any structure wants to use bytenr as single search index should have their
66*4882a593Smuzhiyun  * structure start with these members.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun struct rb_simple_node {
69*4882a593Smuzhiyun 	struct rb_node rb_node;
70*4882a593Smuzhiyun 	u64 bytenr;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
rb_simple_search(struct rb_root * root,u64 bytenr)73*4882a593Smuzhiyun static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct rb_node *node = root->rb_node;
76*4882a593Smuzhiyun 	struct rb_simple_node *entry;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	while (node) {
79*4882a593Smuzhiyun 		entry = rb_entry(node, struct rb_simple_node, rb_node);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		if (bytenr < entry->bytenr)
82*4882a593Smuzhiyun 			node = node->rb_left;
83*4882a593Smuzhiyun 		else if (bytenr > entry->bytenr)
84*4882a593Smuzhiyun 			node = node->rb_right;
85*4882a593Smuzhiyun 		else
86*4882a593Smuzhiyun 			return node;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	return NULL;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
rb_simple_insert(struct rb_root * root,u64 bytenr,struct rb_node * node)91*4882a593Smuzhiyun static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
92*4882a593Smuzhiyun 					       struct rb_node *node)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct rb_node **p = &root->rb_node;
95*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
96*4882a593Smuzhiyun 	struct rb_simple_node *entry;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	while (*p) {
99*4882a593Smuzhiyun 		parent = *p;
100*4882a593Smuzhiyun 		entry = rb_entry(parent, struct rb_simple_node, rb_node);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		if (bytenr < entry->bytenr)
103*4882a593Smuzhiyun 			p = &(*p)->rb_left;
104*4882a593Smuzhiyun 		else if (bytenr > entry->bytenr)
105*4882a593Smuzhiyun 			p = &(*p)->rb_right;
106*4882a593Smuzhiyun 		else
107*4882a593Smuzhiyun 			return parent;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	rb_link_node(node, parent, p);
111*4882a593Smuzhiyun 	rb_insert_color(node, root);
112*4882a593Smuzhiyun 	return NULL;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #endif
116