xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_fixed.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: MIT */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2018 Intel Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _I915_FIXED_H_
7*4882a593Smuzhiyun #define _I915_FIXED_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bug.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/math64.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun typedef struct {
15*4882a593Smuzhiyun 	u32 val;
16*4882a593Smuzhiyun } uint_fixed_16_16_t;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
19*4882a593Smuzhiyun 
is_fixed16_zero(uint_fixed_16_16_t val)20*4882a593Smuzhiyun static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	return val.val == 0;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
u32_to_fixed16(u32 val)25*4882a593Smuzhiyun static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	uint_fixed_16_16_t fp = { .val = val << 16 };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	WARN_ON(val > U16_MAX);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	return fp;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
fixed16_to_u32_round_up(uint_fixed_16_16_t fp)34*4882a593Smuzhiyun static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return DIV_ROUND_UP(fp.val, 1 << 16);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
fixed16_to_u32(uint_fixed_16_16_t fp)39*4882a593Smuzhiyun static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	return fp.val >> 16;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
min_fixed16(uint_fixed_16_16_t min1,uint_fixed_16_16_t min2)44*4882a593Smuzhiyun static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
45*4882a593Smuzhiyun 					     uint_fixed_16_16_t min2)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	return min;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
max_fixed16(uint_fixed_16_16_t max1,uint_fixed_16_16_t max2)52*4882a593Smuzhiyun static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
53*4882a593Smuzhiyun 					     uint_fixed_16_16_t max2)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	return max;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
clamp_u64_to_fixed16(u64 val)60*4882a593Smuzhiyun static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	uint_fixed_16_16_t fp = { .val = (u32)val };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	WARN_ON(val > U32_MAX);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return fp;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
div_round_up_fixed16(uint_fixed_16_16_t val,uint_fixed_16_16_t d)69*4882a593Smuzhiyun static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
70*4882a593Smuzhiyun 				       uint_fixed_16_16_t d)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	return DIV_ROUND_UP(val.val, d.val);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
mul_round_up_u32_fixed16(u32 val,uint_fixed_16_16_t mul)75*4882a593Smuzhiyun static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	u64 tmp;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	tmp = mul_u32_u32(val, mul.val);
80*4882a593Smuzhiyun 	tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
81*4882a593Smuzhiyun 	WARN_ON(tmp > U32_MAX);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return (u32)tmp;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
mul_fixed16(uint_fixed_16_16_t val,uint_fixed_16_16_t mul)86*4882a593Smuzhiyun static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
87*4882a593Smuzhiyun 					     uint_fixed_16_16_t mul)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	u64 tmp;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	tmp = mul_u32_u32(val.val, mul.val);
92*4882a593Smuzhiyun 	tmp = tmp >> 16;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return clamp_u64_to_fixed16(tmp);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
div_fixed16(u32 val,u32 d)97*4882a593Smuzhiyun static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	u64 tmp;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	tmp = (u64)val << 16;
102*4882a593Smuzhiyun 	tmp = DIV_ROUND_UP_ULL(tmp, d);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return clamp_u64_to_fixed16(tmp);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
div_round_up_u32_fixed16(u32 val,uint_fixed_16_16_t d)107*4882a593Smuzhiyun static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	u64 tmp;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	tmp = (u64)val << 16;
112*4882a593Smuzhiyun 	tmp = DIV_ROUND_UP_ULL(tmp, d.val);
113*4882a593Smuzhiyun 	WARN_ON(tmp > U32_MAX);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return (u32)tmp;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
mul_u32_fixed16(u32 val,uint_fixed_16_16_t mul)118*4882a593Smuzhiyun static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	u64 tmp;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	tmp = mul_u32_u32(val, mul.val);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return clamp_u64_to_fixed16(tmp);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
add_fixed16(uint_fixed_16_16_t add1,uint_fixed_16_16_t add2)127*4882a593Smuzhiyun static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
128*4882a593Smuzhiyun 					     uint_fixed_16_16_t add2)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	u64 tmp;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	tmp = (u64)add1.val + add2.val;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return clamp_u64_to_fixed16(tmp);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
add_fixed16_u32(uint_fixed_16_16_t add1,u32 add2)137*4882a593Smuzhiyun static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
138*4882a593Smuzhiyun 						 u32 add2)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
141*4882a593Smuzhiyun 	u64 tmp;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	tmp = (u64)add1.val + tmp_add2.val;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return clamp_u64_to_fixed16(tmp);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #endif /* _I915_FIXED_H_ */
149