1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef _IBA_DEFS_H_
6*4882a593Smuzhiyun #define _IBA_DEFS_H_
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun #include <asm/unaligned.h>
11*4882a593Smuzhiyun
_iba_get8(const u8 * ptr)12*4882a593Smuzhiyun static inline u32 _iba_get8(const u8 *ptr)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun return *ptr;
15*4882a593Smuzhiyun }
16*4882a593Smuzhiyun
_iba_set8(u8 * ptr,u32 mask,u32 prep_value)17*4882a593Smuzhiyun static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun *ptr = (*ptr & ~mask) | prep_value;
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun
_iba_get16(const __be16 * ptr)22*4882a593Smuzhiyun static inline u16 _iba_get16(const __be16 *ptr)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun return be16_to_cpu(*ptr);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
_iba_set16(__be16 * ptr,u16 mask,u16 prep_value)27*4882a593Smuzhiyun static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
_iba_get32(const __be32 * ptr)32*4882a593Smuzhiyun static inline u32 _iba_get32(const __be32 *ptr)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun return be32_to_cpu(*ptr);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
_iba_set32(__be32 * ptr,u32 mask,u32 prep_value)37*4882a593Smuzhiyun static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
_iba_get64(const __be64 * ptr)42*4882a593Smuzhiyun static inline u64 _iba_get64(const __be64 *ptr)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * The mads are constructed so that 32 bit and smaller are naturally
46*4882a593Smuzhiyun * aligned, everything larger has a max alignment of 4 bytes.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun return be64_to_cpu(get_unaligned(ptr));
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
_iba_set64(__be64 * ptr,u64 mask,u64 prep_value)51*4882a593Smuzhiyun static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
57*4882a593Smuzhiyun ({ \
58*4882a593Smuzhiyun field_struct *_ptr = ptr; \
59*4882a593Smuzhiyun _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \
60*4882a593Smuzhiyun FIELD_PREP(field_mask, value)); \
61*4882a593Smuzhiyun })
62*4882a593Smuzhiyun #define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \
65*4882a593Smuzhiyun ({ \
66*4882a593Smuzhiyun field_struct *_ptr = ptr; \
67*4882a593Smuzhiyun (type *)((void *)_ptr + (field_offset)); \
68*4882a593Smuzhiyun })
69*4882a593Smuzhiyun #define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
72*4882a593Smuzhiyun #define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \
73*4882a593Smuzhiyun bytes) \
74*4882a593Smuzhiyun ({ \
75*4882a593Smuzhiyun const type *_in_ptr = in; \
76*4882a593Smuzhiyun WARN_ON(bytes * 8 > num_bits); \
77*4882a593Smuzhiyun if (in && bytes) \
78*4882a593Smuzhiyun memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \
79*4882a593Smuzhiyun type, num_bits, ptr), \
80*4882a593Smuzhiyun _in_ptr, bytes); \
81*4882a593Smuzhiyun })
82*4882a593Smuzhiyun #define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \
85*4882a593Smuzhiyun ({ \
86*4882a593Smuzhiyun const field_struct *_ptr = ptr; \
87*4882a593Smuzhiyun (u##num_bits) FIELD_GET( \
88*4882a593Smuzhiyun field_mask, _iba_get##num_bits((const void *)_ptr + \
89*4882a593Smuzhiyun (field_offset))); \
90*4882a593Smuzhiyun })
91*4882a593Smuzhiyun #define IBA_GET(field, ptr) _IBA_GET(field, ptr)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \
94*4882a593Smuzhiyun bytes) \
95*4882a593Smuzhiyun ({ \
96*4882a593Smuzhiyun type *_out_ptr = out; \
97*4882a593Smuzhiyun WARN_ON(bytes * 8 > num_bits); \
98*4882a593Smuzhiyun if (out && bytes) \
99*4882a593Smuzhiyun memcpy(_out_ptr, \
100*4882a593Smuzhiyun _IBA_GET_MEM_PTR(field_struct, field_offset, \
101*4882a593Smuzhiyun type, num_bits, ptr), \
102*4882a593Smuzhiyun bytes); \
103*4882a593Smuzhiyun })
104*4882a593Smuzhiyun #define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * The generated list becomes the parameters to the macros, the order is:
108*4882a593Smuzhiyun * - struct this applies to
109*4882a593Smuzhiyun * - starting offset of the max
110*4882a593Smuzhiyun * - GENMASK or GENMASK_ULL in CPU order
111*4882a593Smuzhiyun * - The width of data the mask operations should work on, in bits
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * Extraction using a tabular description like table 106. bit_offset is from
116*4882a593Smuzhiyun * the Byte[Bit] notation.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun #define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \
119*4882a593Smuzhiyun field_struct, byte_offset, \
120*4882a593Smuzhiyun GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \
121*4882a593Smuzhiyun 8
122*4882a593Smuzhiyun #define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \
123*4882a593Smuzhiyun IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \
126*4882a593Smuzhiyun field_struct, (byte_offset)&0xFFFE, \
127*4882a593Smuzhiyun GENMASK(15 - (((byte_offset) % 2) * 8), \
128*4882a593Smuzhiyun 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \
129*4882a593Smuzhiyun 16
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \
132*4882a593Smuzhiyun field_struct, (byte_offset)&0xFFFC, \
133*4882a593Smuzhiyun GENMASK(31 - (((byte_offset) % 4) * 8), \
134*4882a593Smuzhiyun 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \
135*4882a593Smuzhiyun 32
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #define IBA_FIELD64_LOC(field_struct, byte_offset) \
138*4882a593Smuzhiyun field_struct, byte_offset, GENMASK_ULL(63, 0), 64
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * In IBTA spec, everything that is more than 64bits is multiple
141*4882a593Smuzhiyun * of bytes without leftover bits.
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun #define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \
144*4882a593Smuzhiyun field_struct, byte_offset, type, num_bits
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #endif /* _IBA_DEFS_H_ */
147