1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019, Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "ice_common.h"
5*4882a593Smuzhiyun #include "ice_flow.h"
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /* Describe properties of a protocol header field */
8*4882a593Smuzhiyun struct ice_flow_field_info {
9*4882a593Smuzhiyun enum ice_flow_seg_hdr hdr;
10*4882a593Smuzhiyun s16 off; /* Offset from start of a protocol header, in bits */
11*4882a593Smuzhiyun u16 size; /* Size of fields in bits */
12*4882a593Smuzhiyun };
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15*4882a593Smuzhiyun .hdr = _hdr, \
16*4882a593Smuzhiyun .off = (_offset_bytes) * BITS_PER_BYTE, \
17*4882a593Smuzhiyun .size = (_size_bytes) * BITS_PER_BYTE, \
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Table containing properties of supported protocol header fields */
21*4882a593Smuzhiyun static const
22*4882a593Smuzhiyun struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23*4882a593Smuzhiyun /* IPv4 / IPv6 */
24*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_IPV4_SA */
25*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_IPV4_DA */
27*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_IPV6_SA */
29*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_IPV6_DA */
31*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32*4882a593Smuzhiyun /* Transport */
33*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
34*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
36*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
38*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
40*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
42*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
44*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45*4882a593Smuzhiyun /* GRE */
46*4882a593Smuzhiyun /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
47*4882a593Smuzhiyun ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48*4882a593Smuzhiyun sizeof_field(struct gre_full_hdr, key)),
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Bitmaps indicating relevant packet types for a particular protocol header
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * Packet types for packets with an Outer/First/Single IPv4 header
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun static const u32 ice_ptypes_ipv4_ofos[] = {
56*4882a593Smuzhiyun 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
58*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
59*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
60*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
61*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
62*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
63*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last IPv4 header */
67*4882a593Smuzhiyun static const u32 ice_ptypes_ipv4_il[] = {
68*4882a593Smuzhiyun 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69*4882a593Smuzhiyun 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
71*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
72*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
73*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
74*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
75*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Packet types for packets with an Outer/First/Single IPv6 header */
79*4882a593Smuzhiyun static const u32 ice_ptypes_ipv6_ofos[] = {
80*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x77000000, 0x10002000,
81*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
82*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
83*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
84*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
85*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
86*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
87*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last IPv6 header */
91*4882a593Smuzhiyun static const u32 ice_ptypes_ipv6_il[] = {
92*4882a593Smuzhiyun 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93*4882a593Smuzhiyun 0x00000770, 0x00000000, 0x00000000, 0x00000000,
94*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
95*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
96*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
97*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
98*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
99*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
103*4882a593Smuzhiyun static const u32 ice_ipv4_ofos_no_l4[] = {
104*4882a593Smuzhiyun 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
105*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
106*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
107*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
108*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
109*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
110*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
111*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
115*4882a593Smuzhiyun static const u32 ice_ipv4_il_no_l4[] = {
116*4882a593Smuzhiyun 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
117*4882a593Smuzhiyun 0x00000008, 0x00000000, 0x00000000, 0x00000000,
118*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
119*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
120*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
121*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
122*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
123*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
127*4882a593Smuzhiyun static const u32 ice_ipv6_ofos_no_l4[] = {
128*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x43000000, 0x10002000,
129*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
130*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
131*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
132*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
133*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
134*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
135*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
139*4882a593Smuzhiyun static const u32 ice_ipv6_il_no_l4[] = {
140*4882a593Smuzhiyun 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
141*4882a593Smuzhiyun 0x00000430, 0x00000000, 0x00000000, 0x00000000,
142*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
143*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
144*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
145*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
146*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
147*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* UDP Packet types for non-tunneled packets or tunneled
151*4882a593Smuzhiyun * packets with inner UDP.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun static const u32 ice_ptypes_udp_il[] = {
154*4882a593Smuzhiyun 0x81000000, 0x20204040, 0x04000010, 0x80810102,
155*4882a593Smuzhiyun 0x00000040, 0x00000000, 0x00000000, 0x00000000,
156*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
161*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last TCP header */
165*4882a593Smuzhiyun static const u32 ice_ptypes_tcp_il[] = {
166*4882a593Smuzhiyun 0x04000000, 0x80810102, 0x10000040, 0x02040408,
167*4882a593Smuzhiyun 0x00000102, 0x00000000, 0x00000000, 0x00000000,
168*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
173*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Packet types for packets with an Innermost/Last SCTP header */
177*4882a593Smuzhiyun static const u32 ice_ptypes_sctp_il[] = {
178*4882a593Smuzhiyun 0x08000000, 0x01020204, 0x20000081, 0x04080810,
179*4882a593Smuzhiyun 0x00000204, 0x00000000, 0x00000000, 0x00000000,
180*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Packet types for packets with an Outermost/First GRE header */
189*4882a593Smuzhiyun static const u32 ice_ptypes_gre_of[] = {
190*4882a593Smuzhiyun 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
191*4882a593Smuzhiyun 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
192*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197*4882a593Smuzhiyun 0x00000000, 0x00000000, 0x00000000, 0x00000000,
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Manage parameters and info. used during the creation of a flow profile */
201*4882a593Smuzhiyun struct ice_flow_prof_params {
202*4882a593Smuzhiyun enum ice_block blk;
203*4882a593Smuzhiyun u16 entry_length; /* # of bytes formatted entry will require */
204*4882a593Smuzhiyun u8 es_cnt;
205*4882a593Smuzhiyun struct ice_flow_prof *prof;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
208*4882a593Smuzhiyun * This will give us the direction flags.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun struct ice_fv_word es[ICE_MAX_FV_WORDS];
211*4882a593Smuzhiyun DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun #define ICE_FLOW_SEG_HDRS_L3_MASK \
215*4882a593Smuzhiyun (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
216*4882a593Smuzhiyun #define ICE_FLOW_SEG_HDRS_L4_MASK \
217*4882a593Smuzhiyun (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun * ice_flow_val_hdrs - validates packet segments for valid protocol headers
221*4882a593Smuzhiyun * @segs: array of one or more packet segments that describe the flow
222*4882a593Smuzhiyun * @segs_cnt: number of packet segments provided
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun static enum ice_status
ice_flow_val_hdrs(struct ice_flow_seg_info * segs,u8 segs_cnt)225*4882a593Smuzhiyun ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun u8 i;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun for (i = 0; i < segs_cnt; i++) {
230*4882a593Smuzhiyun /* Multiple L3 headers */
231*4882a593Smuzhiyun if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
232*4882a593Smuzhiyun !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
233*4882a593Smuzhiyun return ICE_ERR_PARAM;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Multiple L4 headers */
236*4882a593Smuzhiyun if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
237*4882a593Smuzhiyun !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
238*4882a593Smuzhiyun return ICE_ERR_PARAM;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Sizes of fixed known protocol headers without header options */
245*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_MAC 14
246*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
247*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
248*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_TCP 20
249*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_UDP 8
250*4882a593Smuzhiyun #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /**
253*4882a593Smuzhiyun * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
254*4882a593Smuzhiyun * @params: information about the flow to be processed
255*4882a593Smuzhiyun * @seg: index of packet segment whose header size is to be determined
256*4882a593Smuzhiyun */
ice_flow_calc_seg_sz(struct ice_flow_prof_params * params,u8 seg)257*4882a593Smuzhiyun static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* L3 headers */
262*4882a593Smuzhiyun if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
263*4882a593Smuzhiyun sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
264*4882a593Smuzhiyun else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
265*4882a593Smuzhiyun sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* L4 headers */
268*4882a593Smuzhiyun if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
269*4882a593Smuzhiyun sz += ICE_FLOW_PROT_HDR_SZ_TCP;
270*4882a593Smuzhiyun else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
271*4882a593Smuzhiyun sz += ICE_FLOW_PROT_HDR_SZ_UDP;
272*4882a593Smuzhiyun else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
273*4882a593Smuzhiyun sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return sz;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
280*4882a593Smuzhiyun * @params: information about the flow to be processed
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * This function identifies the packet types associated with the protocol
283*4882a593Smuzhiyun * headers being present in packet segments of the specified flow profile.
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun static enum ice_status
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params * params)286*4882a593Smuzhiyun ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct ice_flow_prof *prof;
289*4882a593Smuzhiyun u8 i;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun memset(params->ptypes, 0xff, sizeof(params->ptypes));
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun prof = params->prof;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun for (i = 0; i < params->prof->segs_cnt; i++) {
296*4882a593Smuzhiyun const unsigned long *src;
297*4882a593Smuzhiyun u32 hdrs;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun hdrs = prof->segs[i].hdrs;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
302*4882a593Smuzhiyun !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
303*4882a593Smuzhiyun src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
304*4882a593Smuzhiyun (const unsigned long *)ice_ipv4_il_no_l4;
305*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
306*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
307*4882a593Smuzhiyun } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
308*4882a593Smuzhiyun src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
309*4882a593Smuzhiyun (const unsigned long *)ice_ptypes_ipv4_il;
310*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
311*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
312*4882a593Smuzhiyun } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
313*4882a593Smuzhiyun !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
314*4882a593Smuzhiyun src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
315*4882a593Smuzhiyun (const unsigned long *)ice_ipv6_il_no_l4;
316*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
317*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
318*4882a593Smuzhiyun } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
319*4882a593Smuzhiyun src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
320*4882a593Smuzhiyun (const unsigned long *)ice_ptypes_ipv6_il;
321*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
322*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
326*4882a593Smuzhiyun src = (const unsigned long *)ice_ptypes_udp_il;
327*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
328*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
329*4882a593Smuzhiyun } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
330*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes,
331*4882a593Smuzhiyun (const unsigned long *)ice_ptypes_tcp_il,
332*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
333*4882a593Smuzhiyun } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
334*4882a593Smuzhiyun src = (const unsigned long *)ice_ptypes_sctp_il;
335*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes, src,
336*4882a593Smuzhiyun ICE_FLOW_PTYPE_MAX);
337*4882a593Smuzhiyun } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
338*4882a593Smuzhiyun if (!i) {
339*4882a593Smuzhiyun src = (const unsigned long *)ice_ptypes_gre_of;
340*4882a593Smuzhiyun bitmap_and(params->ptypes, params->ptypes,
341*4882a593Smuzhiyun src, ICE_FLOW_PTYPE_MAX);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
351*4882a593Smuzhiyun * @hw: pointer to the HW struct
352*4882a593Smuzhiyun * @params: information about the flow to be processed
353*4882a593Smuzhiyun * @seg: packet segment index of the field to be extracted
354*4882a593Smuzhiyun * @fld: ID of field to be extracted
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * This function determines the protocol ID, offset, and size of the given
357*4882a593Smuzhiyun * field. It then allocates one or more extraction sequence entries for the
358*4882a593Smuzhiyun * given field, and fill the entries with protocol ID and offset information.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun static enum ice_status
ice_flow_xtract_fld(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg,enum ice_flow_field fld)361*4882a593Smuzhiyun ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
362*4882a593Smuzhiyun u8 seg, enum ice_flow_field fld)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
365*4882a593Smuzhiyun u8 fv_words = hw->blk[params->blk].es.fvw;
366*4882a593Smuzhiyun struct ice_flow_fld_info *flds;
367*4882a593Smuzhiyun u16 cnt, ese_bits, i;
368*4882a593Smuzhiyun u16 off;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun flds = params->prof->segs[seg].fields;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun switch (fld) {
373*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_IPV4_SA:
374*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_IPV4_DA:
375*4882a593Smuzhiyun prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_IPV6_SA:
378*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_IPV6_DA:
379*4882a593Smuzhiyun prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
380*4882a593Smuzhiyun break;
381*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
382*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
383*4882a593Smuzhiyun prot_id = ICE_PROT_TCP_IL;
384*4882a593Smuzhiyun break;
385*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
386*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
387*4882a593Smuzhiyun prot_id = ICE_PROT_UDP_IL_OR_S;
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
390*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
391*4882a593Smuzhiyun prot_id = ICE_PROT_SCTP_IL;
392*4882a593Smuzhiyun break;
393*4882a593Smuzhiyun case ICE_FLOW_FIELD_IDX_GRE_KEYID:
394*4882a593Smuzhiyun prot_id = ICE_PROT_GRE_OF;
395*4882a593Smuzhiyun break;
396*4882a593Smuzhiyun default:
397*4882a593Smuzhiyun return ICE_ERR_NOT_IMPL;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Each extraction sequence entry is a word in size, and extracts a
401*4882a593Smuzhiyun * word-aligned offset from a protocol header.
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun flds[fld].xtrct.prot_id = prot_id;
406*4882a593Smuzhiyun flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
407*4882a593Smuzhiyun ICE_FLOW_FV_EXTRACT_SZ;
408*4882a593Smuzhiyun flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
409*4882a593Smuzhiyun flds[fld].xtrct.idx = params->es_cnt;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Adjust the next field-entry index after accommodating the number of
412*4882a593Smuzhiyun * entries this field consumes
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
415*4882a593Smuzhiyun ese_bits);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* Fill in the extraction sequence entries needed for this field */
418*4882a593Smuzhiyun off = flds[fld].xtrct.off;
419*4882a593Smuzhiyun for (i = 0; i < cnt; i++) {
420*4882a593Smuzhiyun u8 idx;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Make sure the number of extraction sequence required
423*4882a593Smuzhiyun * does not exceed the block's capability
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun if (params->es_cnt >= fv_words)
426*4882a593Smuzhiyun return ICE_ERR_MAX_LIMIT;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* some blocks require a reversed field vector layout */
429*4882a593Smuzhiyun if (hw->blk[params->blk].es.reverse)
430*4882a593Smuzhiyun idx = fv_words - params->es_cnt - 1;
431*4882a593Smuzhiyun else
432*4882a593Smuzhiyun idx = params->es_cnt;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun params->es[idx].prot_id = prot_id;
435*4882a593Smuzhiyun params->es[idx].off = off;
436*4882a593Smuzhiyun params->es_cnt++;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun off += ICE_FLOW_FV_EXTRACT_SZ;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /**
445*4882a593Smuzhiyun * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
446*4882a593Smuzhiyun * @hw: pointer to the HW struct
447*4882a593Smuzhiyun * @params: information about the flow to be processed
448*4882a593Smuzhiyun * @seg: index of packet segment whose raw fields are to be extracted
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun static enum ice_status
ice_flow_xtract_raws(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg)451*4882a593Smuzhiyun ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
452*4882a593Smuzhiyun u8 seg)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun u16 fv_words;
455*4882a593Smuzhiyun u16 hdrs_sz;
456*4882a593Smuzhiyun u8 i;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (!params->prof->segs[seg].raws_cnt)
459*4882a593Smuzhiyun return 0;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (params->prof->segs[seg].raws_cnt >
462*4882a593Smuzhiyun ARRAY_SIZE(params->prof->segs[seg].raws))
463*4882a593Smuzhiyun return ICE_ERR_MAX_LIMIT;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Offsets within the segment headers are not supported */
466*4882a593Smuzhiyun hdrs_sz = ice_flow_calc_seg_sz(params, seg);
467*4882a593Smuzhiyun if (!hdrs_sz)
468*4882a593Smuzhiyun return ICE_ERR_PARAM;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun fv_words = hw->blk[params->blk].es.fvw;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
473*4882a593Smuzhiyun struct ice_flow_seg_fld_raw *raw;
474*4882a593Smuzhiyun u16 off, cnt, j;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun raw = ¶ms->prof->segs[seg].raws[i];
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Storing extraction information */
479*4882a593Smuzhiyun raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
480*4882a593Smuzhiyun raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
481*4882a593Smuzhiyun ICE_FLOW_FV_EXTRACT_SZ;
482*4882a593Smuzhiyun raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
483*4882a593Smuzhiyun BITS_PER_BYTE;
484*4882a593Smuzhiyun raw->info.xtrct.idx = params->es_cnt;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Determine the number of field vector entries this raw field
487*4882a593Smuzhiyun * consumes.
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
490*4882a593Smuzhiyun (raw->info.src.last * BITS_PER_BYTE),
491*4882a593Smuzhiyun (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
492*4882a593Smuzhiyun off = raw->info.xtrct.off;
493*4882a593Smuzhiyun for (j = 0; j < cnt; j++) {
494*4882a593Smuzhiyun u16 idx;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Make sure the number of extraction sequence required
497*4882a593Smuzhiyun * does not exceed the block's capability
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun if (params->es_cnt >= hw->blk[params->blk].es.count ||
500*4882a593Smuzhiyun params->es_cnt >= ICE_MAX_FV_WORDS)
501*4882a593Smuzhiyun return ICE_ERR_MAX_LIMIT;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* some blocks require a reversed field vector layout */
504*4882a593Smuzhiyun if (hw->blk[params->blk].es.reverse)
505*4882a593Smuzhiyun idx = fv_words - params->es_cnt - 1;
506*4882a593Smuzhiyun else
507*4882a593Smuzhiyun idx = params->es_cnt;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun params->es[idx].prot_id = raw->info.xtrct.prot_id;
510*4882a593Smuzhiyun params->es[idx].off = off;
511*4882a593Smuzhiyun params->es_cnt++;
512*4882a593Smuzhiyun off += ICE_FLOW_FV_EXTRACT_SZ;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /**
520*4882a593Smuzhiyun * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
521*4882a593Smuzhiyun * @hw: pointer to the HW struct
522*4882a593Smuzhiyun * @params: information about the flow to be processed
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * This function iterates through all matched fields in the given segments, and
525*4882a593Smuzhiyun * creates an extraction sequence for the fields.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun static enum ice_status
ice_flow_create_xtrct_seq(struct ice_hw * hw,struct ice_flow_prof_params * params)528*4882a593Smuzhiyun ice_flow_create_xtrct_seq(struct ice_hw *hw,
529*4882a593Smuzhiyun struct ice_flow_prof_params *params)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct ice_flow_prof *prof = params->prof;
532*4882a593Smuzhiyun enum ice_status status = 0;
533*4882a593Smuzhiyun u8 i;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun for (i = 0; i < prof->segs_cnt; i++) {
536*4882a593Smuzhiyun u8 j;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
539*4882a593Smuzhiyun ICE_FLOW_FIELD_IDX_MAX) {
540*4882a593Smuzhiyun status = ice_flow_xtract_fld(hw, params, i,
541*4882a593Smuzhiyun (enum ice_flow_field)j);
542*4882a593Smuzhiyun if (status)
543*4882a593Smuzhiyun return status;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* Process raw matching bytes */
547*4882a593Smuzhiyun status = ice_flow_xtract_raws(hw, params, i);
548*4882a593Smuzhiyun if (status)
549*4882a593Smuzhiyun return status;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return status;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /**
556*4882a593Smuzhiyun * ice_flow_proc_segs - process all packet segments associated with a profile
557*4882a593Smuzhiyun * @hw: pointer to the HW struct
558*4882a593Smuzhiyun * @params: information about the flow to be processed
559*4882a593Smuzhiyun */
560*4882a593Smuzhiyun static enum ice_status
ice_flow_proc_segs(struct ice_hw * hw,struct ice_flow_prof_params * params)561*4882a593Smuzhiyun ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun enum ice_status status;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun status = ice_flow_proc_seg_hdrs(params);
566*4882a593Smuzhiyun if (status)
567*4882a593Smuzhiyun return status;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun status = ice_flow_create_xtrct_seq(hw, params);
570*4882a593Smuzhiyun if (status)
571*4882a593Smuzhiyun return status;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun switch (params->blk) {
574*4882a593Smuzhiyun case ICE_BLK_FD:
575*4882a593Smuzhiyun case ICE_BLK_RSS:
576*4882a593Smuzhiyun status = 0;
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun default:
579*4882a593Smuzhiyun return ICE_ERR_NOT_IMPL;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun return status;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
586*4882a593Smuzhiyun #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
587*4882a593Smuzhiyun #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /**
590*4882a593Smuzhiyun * ice_flow_find_prof_conds - Find a profile matching headers and conditions
591*4882a593Smuzhiyun * @hw: pointer to the HW struct
592*4882a593Smuzhiyun * @blk: classification stage
593*4882a593Smuzhiyun * @dir: flow direction
594*4882a593Smuzhiyun * @segs: array of one or more packet segments that describe the flow
595*4882a593Smuzhiyun * @segs_cnt: number of packet segments provided
596*4882a593Smuzhiyun * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
597*4882a593Smuzhiyun * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
598*4882a593Smuzhiyun */
599*4882a593Smuzhiyun static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,struct ice_flow_seg_info * segs,u8 segs_cnt,u16 vsi_handle,u32 conds)600*4882a593Smuzhiyun ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
601*4882a593Smuzhiyun enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
602*4882a593Smuzhiyun u8 segs_cnt, u16 vsi_handle, u32 conds)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct ice_flow_prof *p, *prof = NULL;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun mutex_lock(&hw->fl_profs_locks[blk]);
607*4882a593Smuzhiyun list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
608*4882a593Smuzhiyun if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
609*4882a593Smuzhiyun segs_cnt && segs_cnt == p->segs_cnt) {
610*4882a593Smuzhiyun u8 i;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* Check for profile-VSI association if specified */
613*4882a593Smuzhiyun if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
614*4882a593Smuzhiyun ice_is_vsi_valid(hw, vsi_handle) &&
615*4882a593Smuzhiyun !test_bit(vsi_handle, p->vsis))
616*4882a593Smuzhiyun continue;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Protocol headers must be checked. Matched fields are
619*4882a593Smuzhiyun * checked if specified.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun for (i = 0; i < segs_cnt; i++)
622*4882a593Smuzhiyun if (segs[i].hdrs != p->segs[i].hdrs ||
623*4882a593Smuzhiyun ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
624*4882a593Smuzhiyun segs[i].match != p->segs[i].match))
625*4882a593Smuzhiyun break;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* A match is found if all segments are matched */
628*4882a593Smuzhiyun if (i == segs_cnt) {
629*4882a593Smuzhiyun prof = p;
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun mutex_unlock(&hw->fl_profs_locks[blk]);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun return prof;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun * ice_flow_find_prof_id - Look up a profile with given profile ID
640*4882a593Smuzhiyun * @hw: pointer to the HW struct
641*4882a593Smuzhiyun * @blk: classification stage
642*4882a593Smuzhiyun * @prof_id: unique ID to identify this flow profile
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun static struct ice_flow_prof *
ice_flow_find_prof_id(struct ice_hw * hw,enum ice_block blk,u64 prof_id)645*4882a593Smuzhiyun ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct ice_flow_prof *p;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
650*4882a593Smuzhiyun if (p->id == prof_id)
651*4882a593Smuzhiyun return p;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun return NULL;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun * ice_dealloc_flow_entry - Deallocate flow entry memory
658*4882a593Smuzhiyun * @hw: pointer to the HW struct
659*4882a593Smuzhiyun * @entry: flow entry to be removed
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun static void
ice_dealloc_flow_entry(struct ice_hw * hw,struct ice_flow_entry * entry)662*4882a593Smuzhiyun ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun if (!entry)
665*4882a593Smuzhiyun return;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (entry->entry)
668*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), entry->entry);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), entry);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /**
674*4882a593Smuzhiyun * ice_flow_rem_entry_sync - Remove a flow entry
675*4882a593Smuzhiyun * @hw: pointer to the HW struct
676*4882a593Smuzhiyun * @blk: classification stage
677*4882a593Smuzhiyun * @entry: flow entry to be removed
678*4882a593Smuzhiyun */
679*4882a593Smuzhiyun static enum ice_status
ice_flow_rem_entry_sync(struct ice_hw * hw,enum ice_block __always_unused blk,struct ice_flow_entry * entry)680*4882a593Smuzhiyun ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
681*4882a593Smuzhiyun struct ice_flow_entry *entry)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun if (!entry)
684*4882a593Smuzhiyun return ICE_ERR_BAD_PTR;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun list_del(&entry->l_entry);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun ice_dealloc_flow_entry(hw, entry);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return 0;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
695*4882a593Smuzhiyun * @hw: pointer to the HW struct
696*4882a593Smuzhiyun * @blk: classification stage
697*4882a593Smuzhiyun * @dir: flow direction
698*4882a593Smuzhiyun * @prof_id: unique ID to identify this flow profile
699*4882a593Smuzhiyun * @segs: array of one or more packet segments that describe the flow
700*4882a593Smuzhiyun * @segs_cnt: number of packet segments provided
701*4882a593Smuzhiyun * @prof: stores the returned flow profile added
702*4882a593Smuzhiyun *
703*4882a593Smuzhiyun * Assumption: the caller has acquired the lock to the profile list
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun static enum ice_status
ice_flow_add_prof_sync(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)706*4882a593Smuzhiyun ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
707*4882a593Smuzhiyun enum ice_flow_dir dir, u64 prof_id,
708*4882a593Smuzhiyun struct ice_flow_seg_info *segs, u8 segs_cnt,
709*4882a593Smuzhiyun struct ice_flow_prof **prof)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct ice_flow_prof_params params;
712*4882a593Smuzhiyun enum ice_status status;
713*4882a593Smuzhiyun u8 i;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (!prof)
716*4882a593Smuzhiyun return ICE_ERR_BAD_PTR;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
719*4882a593Smuzhiyun params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
720*4882a593Smuzhiyun GFP_KERNEL);
721*4882a593Smuzhiyun if (!params.prof)
722*4882a593Smuzhiyun return ICE_ERR_NO_MEMORY;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* initialize extraction sequence to all invalid (0xff) */
725*4882a593Smuzhiyun for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
726*4882a593Smuzhiyun params.es[i].prot_id = ICE_PROT_INVALID;
727*4882a593Smuzhiyun params.es[i].off = ICE_FV_OFFSET_INVAL;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun params.blk = blk;
731*4882a593Smuzhiyun params.prof->id = prof_id;
732*4882a593Smuzhiyun params.prof->dir = dir;
733*4882a593Smuzhiyun params.prof->segs_cnt = segs_cnt;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* Make a copy of the segments that need to be persistent in the flow
736*4882a593Smuzhiyun * profile instance
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun for (i = 0; i < segs_cnt; i++)
739*4882a593Smuzhiyun memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs));
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun status = ice_flow_proc_segs(hw, ¶ms);
742*4882a593Smuzhiyun if (status) {
743*4882a593Smuzhiyun ice_debug(hw, ICE_DBG_FLOW,
744*4882a593Smuzhiyun "Error processing a flow's packet segments\n");
745*4882a593Smuzhiyun goto out;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /* Add a HW profile for this flow profile */
749*4882a593Smuzhiyun status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
750*4882a593Smuzhiyun if (status) {
751*4882a593Smuzhiyun ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
752*4882a593Smuzhiyun goto out;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun INIT_LIST_HEAD(¶ms.prof->entries);
756*4882a593Smuzhiyun mutex_init(¶ms.prof->entries_lock);
757*4882a593Smuzhiyun *prof = params.prof;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun out:
760*4882a593Smuzhiyun if (status)
761*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), params.prof);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun return status;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /**
767*4882a593Smuzhiyun * ice_flow_rem_prof_sync - remove a flow profile
768*4882a593Smuzhiyun * @hw: pointer to the hardware structure
769*4882a593Smuzhiyun * @blk: classification stage
770*4882a593Smuzhiyun * @prof: pointer to flow profile to remove
771*4882a593Smuzhiyun *
772*4882a593Smuzhiyun * Assumption: the caller has acquired the lock to the profile list
773*4882a593Smuzhiyun */
774*4882a593Smuzhiyun static enum ice_status
ice_flow_rem_prof_sync(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof)775*4882a593Smuzhiyun ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
776*4882a593Smuzhiyun struct ice_flow_prof *prof)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun enum ice_status status;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Remove all remaining flow entries before removing the flow profile */
781*4882a593Smuzhiyun if (!list_empty(&prof->entries)) {
782*4882a593Smuzhiyun struct ice_flow_entry *e, *t;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun mutex_lock(&prof->entries_lock);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
787*4882a593Smuzhiyun status = ice_flow_rem_entry_sync(hw, blk, e);
788*4882a593Smuzhiyun if (status)
789*4882a593Smuzhiyun break;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun mutex_unlock(&prof->entries_lock);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /* Remove all hardware profiles associated with this flow profile */
796*4882a593Smuzhiyun status = ice_rem_prof(hw, blk, prof->id);
797*4882a593Smuzhiyun if (!status) {
798*4882a593Smuzhiyun list_del(&prof->l_entry);
799*4882a593Smuzhiyun mutex_destroy(&prof->entries_lock);
800*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), prof);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun return status;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /**
807*4882a593Smuzhiyun * ice_flow_assoc_prof - associate a VSI with a flow profile
808*4882a593Smuzhiyun * @hw: pointer to the hardware structure
809*4882a593Smuzhiyun * @blk: classification stage
810*4882a593Smuzhiyun * @prof: pointer to flow profile
811*4882a593Smuzhiyun * @vsi_handle: software VSI handle
812*4882a593Smuzhiyun *
813*4882a593Smuzhiyun * Assumption: the caller has acquired the lock to the profile list
814*4882a593Smuzhiyun * and the software VSI handle has been validated
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun static enum ice_status
ice_flow_assoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)817*4882a593Smuzhiyun ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
818*4882a593Smuzhiyun struct ice_flow_prof *prof, u16 vsi_handle)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun enum ice_status status = 0;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (!test_bit(vsi_handle, prof->vsis)) {
823*4882a593Smuzhiyun status = ice_add_prof_id_flow(hw, blk,
824*4882a593Smuzhiyun ice_get_hw_vsi_num(hw,
825*4882a593Smuzhiyun vsi_handle),
826*4882a593Smuzhiyun prof->id);
827*4882a593Smuzhiyun if (!status)
828*4882a593Smuzhiyun set_bit(vsi_handle, prof->vsis);
829*4882a593Smuzhiyun else
830*4882a593Smuzhiyun ice_debug(hw, ICE_DBG_FLOW,
831*4882a593Smuzhiyun "HW profile add failed, %d\n",
832*4882a593Smuzhiyun status);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return status;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
840*4882a593Smuzhiyun * @hw: pointer to the hardware structure
841*4882a593Smuzhiyun * @blk: classification stage
842*4882a593Smuzhiyun * @prof: pointer to flow profile
843*4882a593Smuzhiyun * @vsi_handle: software VSI handle
844*4882a593Smuzhiyun *
845*4882a593Smuzhiyun * Assumption: the caller has acquired the lock to the profile list
846*4882a593Smuzhiyun * and the software VSI handle has been validated
847*4882a593Smuzhiyun */
848*4882a593Smuzhiyun static enum ice_status
ice_flow_disassoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)849*4882a593Smuzhiyun ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
850*4882a593Smuzhiyun struct ice_flow_prof *prof, u16 vsi_handle)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun enum ice_status status = 0;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (test_bit(vsi_handle, prof->vsis)) {
855*4882a593Smuzhiyun status = ice_rem_prof_id_flow(hw, blk,
856*4882a593Smuzhiyun ice_get_hw_vsi_num(hw,
857*4882a593Smuzhiyun vsi_handle),
858*4882a593Smuzhiyun prof->id);
859*4882a593Smuzhiyun if (!status)
860*4882a593Smuzhiyun clear_bit(vsi_handle, prof->vsis);
861*4882a593Smuzhiyun else
862*4882a593Smuzhiyun ice_debug(hw, ICE_DBG_FLOW,
863*4882a593Smuzhiyun "HW profile remove failed, %d\n",
864*4882a593Smuzhiyun status);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return status;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /**
871*4882a593Smuzhiyun * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
872*4882a593Smuzhiyun * @hw: pointer to the HW struct
873*4882a593Smuzhiyun * @blk: classification stage
874*4882a593Smuzhiyun * @dir: flow direction
875*4882a593Smuzhiyun * @prof_id: unique ID to identify this flow profile
876*4882a593Smuzhiyun * @segs: array of one or more packet segments that describe the flow
877*4882a593Smuzhiyun * @segs_cnt: number of packet segments provided
878*4882a593Smuzhiyun * @prof: stores the returned flow profile added
879*4882a593Smuzhiyun */
880*4882a593Smuzhiyun enum ice_status
ice_flow_add_prof(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_prof ** prof)881*4882a593Smuzhiyun ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
882*4882a593Smuzhiyun u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
883*4882a593Smuzhiyun struct ice_flow_prof **prof)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun enum ice_status status;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (segs_cnt > ICE_FLOW_SEG_MAX)
888*4882a593Smuzhiyun return ICE_ERR_MAX_LIMIT;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (!segs_cnt)
891*4882a593Smuzhiyun return ICE_ERR_PARAM;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (!segs)
894*4882a593Smuzhiyun return ICE_ERR_BAD_PTR;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun status = ice_flow_val_hdrs(segs, segs_cnt);
897*4882a593Smuzhiyun if (status)
898*4882a593Smuzhiyun return status;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun mutex_lock(&hw->fl_profs_locks[blk]);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
903*4882a593Smuzhiyun prof);
904*4882a593Smuzhiyun if (!status)
905*4882a593Smuzhiyun list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun mutex_unlock(&hw->fl_profs_locks[blk]);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return status;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /**
913*4882a593Smuzhiyun * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
914*4882a593Smuzhiyun * @hw: pointer to the HW struct
915*4882a593Smuzhiyun * @blk: the block for which the flow profile is to be removed
916*4882a593Smuzhiyun * @prof_id: unique ID of the flow profile to be removed
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun enum ice_status
ice_flow_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 prof_id)919*4882a593Smuzhiyun ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct ice_flow_prof *prof;
922*4882a593Smuzhiyun enum ice_status status;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun mutex_lock(&hw->fl_profs_locks[blk]);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun prof = ice_flow_find_prof_id(hw, blk, prof_id);
927*4882a593Smuzhiyun if (!prof) {
928*4882a593Smuzhiyun status = ICE_ERR_DOES_NOT_EXIST;
929*4882a593Smuzhiyun goto out;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* prof becomes invalid after the call */
933*4882a593Smuzhiyun status = ice_flow_rem_prof_sync(hw, blk, prof);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun out:
936*4882a593Smuzhiyun mutex_unlock(&hw->fl_profs_locks[blk]);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun return status;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /**
942*4882a593Smuzhiyun * ice_flow_add_entry - Add a flow entry
943*4882a593Smuzhiyun * @hw: pointer to the HW struct
944*4882a593Smuzhiyun * @blk: classification stage
945*4882a593Smuzhiyun * @prof_id: ID of the profile to add a new flow entry to
946*4882a593Smuzhiyun * @entry_id: unique ID to identify this flow entry
947*4882a593Smuzhiyun * @vsi_handle: software VSI handle for the flow entry
948*4882a593Smuzhiyun * @prio: priority of the flow entry
949*4882a593Smuzhiyun * @data: pointer to a data buffer containing flow entry's match values/masks
950*4882a593Smuzhiyun * @entry_h: pointer to buffer that receives the new flow entry's handle
951*4882a593Smuzhiyun */
952*4882a593Smuzhiyun enum ice_status
ice_flow_add_entry(struct ice_hw * hw,enum ice_block blk,u64 prof_id,u64 entry_id,u16 vsi_handle,enum ice_flow_priority prio,void * data,u64 * entry_h)953*4882a593Smuzhiyun ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
954*4882a593Smuzhiyun u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
955*4882a593Smuzhiyun void *data, u64 *entry_h)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun struct ice_flow_entry *e = NULL;
958*4882a593Smuzhiyun struct ice_flow_prof *prof;
959*4882a593Smuzhiyun enum ice_status status;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /* No flow entry data is expected for RSS */
962*4882a593Smuzhiyun if (!entry_h || (!data && blk != ICE_BLK_RSS))
963*4882a593Smuzhiyun return ICE_ERR_BAD_PTR;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (!ice_is_vsi_valid(hw, vsi_handle))
966*4882a593Smuzhiyun return ICE_ERR_PARAM;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun mutex_lock(&hw->fl_profs_locks[blk]);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun prof = ice_flow_find_prof_id(hw, blk, prof_id);
971*4882a593Smuzhiyun if (!prof) {
972*4882a593Smuzhiyun status = ICE_ERR_DOES_NOT_EXIST;
973*4882a593Smuzhiyun } else {
974*4882a593Smuzhiyun /* Allocate memory for the entry being added and associate
975*4882a593Smuzhiyun * the VSI to the found flow profile
976*4882a593Smuzhiyun */
977*4882a593Smuzhiyun e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
978*4882a593Smuzhiyun if (!e)
979*4882a593Smuzhiyun status = ICE_ERR_NO_MEMORY;
980*4882a593Smuzhiyun else
981*4882a593Smuzhiyun status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun mutex_unlock(&hw->fl_profs_locks[blk]);
985*4882a593Smuzhiyun if (status)
986*4882a593Smuzhiyun goto out;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun e->id = entry_id;
989*4882a593Smuzhiyun e->vsi_handle = vsi_handle;
990*4882a593Smuzhiyun e->prof = prof;
991*4882a593Smuzhiyun e->priority = prio;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun switch (blk) {
994*4882a593Smuzhiyun case ICE_BLK_FD:
995*4882a593Smuzhiyun case ICE_BLK_RSS:
996*4882a593Smuzhiyun break;
997*4882a593Smuzhiyun default:
998*4882a593Smuzhiyun status = ICE_ERR_NOT_IMPL;
999*4882a593Smuzhiyun goto out;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun mutex_lock(&prof->entries_lock);
1003*4882a593Smuzhiyun list_add(&e->l_entry, &prof->entries);
1004*4882a593Smuzhiyun mutex_unlock(&prof->entries_lock);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun *entry_h = ICE_FLOW_ENTRY_HNDL(e);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun out:
1009*4882a593Smuzhiyun if (status && e) {
1010*4882a593Smuzhiyun if (e->entry)
1011*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), e->entry);
1012*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), e);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun return status;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /**
1019*4882a593Smuzhiyun * ice_flow_rem_entry - Remove a flow entry
1020*4882a593Smuzhiyun * @hw: pointer to the HW struct
1021*4882a593Smuzhiyun * @blk: classification stage
1022*4882a593Smuzhiyun * @entry_h: handle to the flow entry to be removed
1023*4882a593Smuzhiyun */
ice_flow_rem_entry(struct ice_hw * hw,enum ice_block blk,u64 entry_h)1024*4882a593Smuzhiyun enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
1025*4882a593Smuzhiyun u64 entry_h)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun struct ice_flow_entry *entry;
1028*4882a593Smuzhiyun struct ice_flow_prof *prof;
1029*4882a593Smuzhiyun enum ice_status status = 0;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1032*4882a593Smuzhiyun return ICE_ERR_PARAM;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun entry = ICE_FLOW_ENTRY_PTR(entry_h);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Retain the pointer to the flow profile as the entry will be freed */
1037*4882a593Smuzhiyun prof = entry->prof;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (prof) {
1040*4882a593Smuzhiyun mutex_lock(&prof->entries_lock);
1041*4882a593Smuzhiyun status = ice_flow_rem_entry_sync(hw, blk, entry);
1042*4882a593Smuzhiyun mutex_unlock(&prof->entries_lock);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun return status;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /**
1049*4882a593Smuzhiyun * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1050*4882a593Smuzhiyun * @seg: packet segment the field being set belongs to
1051*4882a593Smuzhiyun * @fld: field to be set
1052*4882a593Smuzhiyun * @field_type: type of the field
1053*4882a593Smuzhiyun * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1054*4882a593Smuzhiyun * entry's input buffer
1055*4882a593Smuzhiyun * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1056*4882a593Smuzhiyun * input buffer
1057*4882a593Smuzhiyun * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1058*4882a593Smuzhiyun * entry's input buffer
1059*4882a593Smuzhiyun *
1060*4882a593Smuzhiyun * This helper function stores information of a field being matched, including
1061*4882a593Smuzhiyun * the type of the field and the locations of the value to match, the mask, and
1062*4882a593Smuzhiyun * the upper-bound value in the start of the input buffer for a flow entry.
1063*4882a593Smuzhiyun * This function should only be used for fixed-size data structures.
1064*4882a593Smuzhiyun *
1065*4882a593Smuzhiyun * This function also opportunistically determines the protocol headers to be
1066*4882a593Smuzhiyun * present based on the fields being set. Some fields cannot be used alone to
1067*4882a593Smuzhiyun * determine the protocol headers present. Sometimes, fields for particular
1068*4882a593Smuzhiyun * protocol headers are not matched. In those cases, the protocol headers
1069*4882a593Smuzhiyun * must be explicitly set.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun static void
ice_flow_set_fld_ext(struct ice_flow_seg_info * seg,enum ice_flow_field fld,enum ice_flow_fld_match_type field_type,u16 val_loc,u16 mask_loc,u16 last_loc)1072*4882a593Smuzhiyun ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1073*4882a593Smuzhiyun enum ice_flow_fld_match_type field_type, u16 val_loc,
1074*4882a593Smuzhiyun u16 mask_loc, u16 last_loc)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun u64 bit = BIT_ULL(fld);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun seg->match |= bit;
1079*4882a593Smuzhiyun if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1080*4882a593Smuzhiyun seg->range |= bit;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun seg->fields[fld].type = field_type;
1083*4882a593Smuzhiyun seg->fields[fld].src.val = val_loc;
1084*4882a593Smuzhiyun seg->fields[fld].src.mask = mask_loc;
1085*4882a593Smuzhiyun seg->fields[fld].src.last = last_loc;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /**
1091*4882a593Smuzhiyun * ice_flow_set_fld - specifies locations of field from entry's input buffer
1092*4882a593Smuzhiyun * @seg: packet segment the field being set belongs to
1093*4882a593Smuzhiyun * @fld: field to be set
1094*4882a593Smuzhiyun * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1095*4882a593Smuzhiyun * entry's input buffer
1096*4882a593Smuzhiyun * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1097*4882a593Smuzhiyun * input buffer
1098*4882a593Smuzhiyun * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1099*4882a593Smuzhiyun * entry's input buffer
1100*4882a593Smuzhiyun * @range: indicate if field being matched is to be in a range
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * This function specifies the locations, in the form of byte offsets from the
1103*4882a593Smuzhiyun * start of the input buffer for a flow entry, from where the value to match,
1104*4882a593Smuzhiyun * the mask value, and upper value can be extracted. These locations are then
1105*4882a593Smuzhiyun * stored in the flow profile. When adding a flow entry associated with the
1106*4882a593Smuzhiyun * flow profile, these locations will be used to quickly extract the values and
1107*4882a593Smuzhiyun * create the content of a match entry. This function should only be used for
1108*4882a593Smuzhiyun * fixed-size data structures.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun void
ice_flow_set_fld(struct ice_flow_seg_info * seg,enum ice_flow_field fld,u16 val_loc,u16 mask_loc,u16 last_loc,bool range)1111*4882a593Smuzhiyun ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1112*4882a593Smuzhiyun u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun enum ice_flow_fld_match_type t = range ?
1115*4882a593Smuzhiyun ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /**
1121*4882a593Smuzhiyun * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1122*4882a593Smuzhiyun * @seg: packet segment the field being set belongs to
1123*4882a593Smuzhiyun * @off: offset of the raw field from the beginning of the segment in bytes
1124*4882a593Smuzhiyun * @len: length of the raw pattern to be matched
1125*4882a593Smuzhiyun * @val_loc: location of the value to match from entry's input buffer
1126*4882a593Smuzhiyun * @mask_loc: location of mask value from entry's input buffer
1127*4882a593Smuzhiyun *
1128*4882a593Smuzhiyun * This function specifies the offset of the raw field to be match from the
1129*4882a593Smuzhiyun * beginning of the specified packet segment, and the locations, in the form of
1130*4882a593Smuzhiyun * byte offsets from the start of the input buffer for a flow entry, from where
1131*4882a593Smuzhiyun * the value to match and the mask value to be extracted. These locations are
1132*4882a593Smuzhiyun * then stored in the flow profile. When adding flow entries to the associated
1133*4882a593Smuzhiyun * flow profile, these locations can be used to quickly extract the values to
1134*4882a593Smuzhiyun * create the content of a match entry. This function should only be used for
1135*4882a593Smuzhiyun * fixed-size data structures.
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun void
ice_flow_add_fld_raw(struct ice_flow_seg_info * seg,u16 off,u8 len,u16 val_loc,u16 mask_loc)1138*4882a593Smuzhiyun ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1139*4882a593Smuzhiyun u16 val_loc, u16 mask_loc)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1142*4882a593Smuzhiyun seg->raws[seg->raws_cnt].off = off;
1143*4882a593Smuzhiyun seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1144*4882a593Smuzhiyun seg->raws[seg->raws_cnt].info.src.val = val_loc;
1145*4882a593Smuzhiyun seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1146*4882a593Smuzhiyun /* The "last" field is used to store the length of the field */
1147*4882a593Smuzhiyun seg->raws[seg->raws_cnt].info.src.last = len;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun /* Overflows of "raws" will be handled as an error condition later in
1151*4882a593Smuzhiyun * the flow when this information is processed.
1152*4882a593Smuzhiyun */
1153*4882a593Smuzhiyun seg->raws_cnt++;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1157*4882a593Smuzhiyun (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1160*4882a593Smuzhiyun (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1163*4882a593Smuzhiyun (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1164*4882a593Smuzhiyun ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun /**
1167*4882a593Smuzhiyun * ice_flow_set_rss_seg_info - setup packet segments for RSS
1168*4882a593Smuzhiyun * @segs: pointer to the flow field segment(s)
1169*4882a593Smuzhiyun * @hash_fields: fields to be hashed on for the segment(s)
1170*4882a593Smuzhiyun * @flow_hdr: protocol header fields within a packet segment
1171*4882a593Smuzhiyun *
1172*4882a593Smuzhiyun * Helper function to extract fields from hash bitmap and use flow
1173*4882a593Smuzhiyun * header value to set flow field segment for further use in flow
1174*4882a593Smuzhiyun * profile entry or removal.
1175*4882a593Smuzhiyun */
1176*4882a593Smuzhiyun static enum ice_status
ice_flow_set_rss_seg_info(struct ice_flow_seg_info * segs,u64 hash_fields,u32 flow_hdr)1177*4882a593Smuzhiyun ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1178*4882a593Smuzhiyun u32 flow_hdr)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun u64 val;
1181*4882a593Smuzhiyun u8 i;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun for_each_set_bit(i, (unsigned long *)&hash_fields,
1184*4882a593Smuzhiyun ICE_FLOW_FIELD_IDX_MAX)
1185*4882a593Smuzhiyun ice_flow_set_fld(segs, (enum ice_flow_field)i,
1186*4882a593Smuzhiyun ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1187*4882a593Smuzhiyun ICE_FLOW_FLD_OFF_INVAL, false);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun ICE_FLOW_SET_HDRS(segs, flow_hdr);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1192*4882a593Smuzhiyun return ICE_ERR_PARAM;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1195*4882a593Smuzhiyun if (val && !is_power_of_2(val))
1196*4882a593Smuzhiyun return ICE_ERR_CFG;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1199*4882a593Smuzhiyun if (val && !is_power_of_2(val))
1200*4882a593Smuzhiyun return ICE_ERR_CFG;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun return 0;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /**
1206*4882a593Smuzhiyun * ice_rem_vsi_rss_list - remove VSI from RSS list
1207*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1208*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1209*4882a593Smuzhiyun *
1210*4882a593Smuzhiyun * Remove the VSI from all RSS configurations in the list.
1211*4882a593Smuzhiyun */
ice_rem_vsi_rss_list(struct ice_hw * hw,u16 vsi_handle)1212*4882a593Smuzhiyun void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun struct ice_rss_cfg *r, *tmp;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (list_empty(&hw->rss_list_head))
1217*4882a593Smuzhiyun return;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun mutex_lock(&hw->rss_locks);
1220*4882a593Smuzhiyun list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1221*4882a593Smuzhiyun if (test_and_clear_bit(vsi_handle, r->vsis))
1222*4882a593Smuzhiyun if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1223*4882a593Smuzhiyun list_del(&r->l_entry);
1224*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), r);
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun mutex_unlock(&hw->rss_locks);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun /**
1230*4882a593Smuzhiyun * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1231*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1232*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1233*4882a593Smuzhiyun *
1234*4882a593Smuzhiyun * This function will iterate through all flow profiles and disassociate
1235*4882a593Smuzhiyun * the VSI from that profile. If the flow profile has no VSIs it will
1236*4882a593Smuzhiyun * be removed.
1237*4882a593Smuzhiyun */
ice_rem_vsi_rss_cfg(struct ice_hw * hw,u16 vsi_handle)1238*4882a593Smuzhiyun enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun const enum ice_block blk = ICE_BLK_RSS;
1241*4882a593Smuzhiyun struct ice_flow_prof *p, *t;
1242*4882a593Smuzhiyun enum ice_status status = 0;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun if (!ice_is_vsi_valid(hw, vsi_handle))
1245*4882a593Smuzhiyun return ICE_ERR_PARAM;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (list_empty(&hw->fl_profs[blk]))
1248*4882a593Smuzhiyun return 0;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun mutex_lock(&hw->rss_locks);
1251*4882a593Smuzhiyun list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1252*4882a593Smuzhiyun if (test_bit(vsi_handle, p->vsis)) {
1253*4882a593Smuzhiyun status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1254*4882a593Smuzhiyun if (status)
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1258*4882a593Smuzhiyun status = ice_flow_rem_prof(hw, blk, p->id);
1259*4882a593Smuzhiyun if (status)
1260*4882a593Smuzhiyun break;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun mutex_unlock(&hw->rss_locks);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun return status;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /**
1269*4882a593Smuzhiyun * ice_rem_rss_list - remove RSS configuration from list
1270*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1271*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1272*4882a593Smuzhiyun * @prof: pointer to flow profile
1273*4882a593Smuzhiyun *
1274*4882a593Smuzhiyun * Assumption: lock has already been acquired for RSS list
1275*4882a593Smuzhiyun */
1276*4882a593Smuzhiyun static void
ice_rem_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1277*4882a593Smuzhiyun ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct ice_rss_cfg *r, *tmp;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /* Search for RSS hash fields associated to the VSI that match the
1282*4882a593Smuzhiyun * hash configurations associated to the flow profile. If found
1283*4882a593Smuzhiyun * remove from the RSS entry list of the VSI context and delete entry.
1284*4882a593Smuzhiyun */
1285*4882a593Smuzhiyun list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1286*4882a593Smuzhiyun if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1287*4882a593Smuzhiyun r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1288*4882a593Smuzhiyun clear_bit(vsi_handle, r->vsis);
1289*4882a593Smuzhiyun if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1290*4882a593Smuzhiyun list_del(&r->l_entry);
1291*4882a593Smuzhiyun devm_kfree(ice_hw_to_dev(hw), r);
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun return;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun /**
1298*4882a593Smuzhiyun * ice_add_rss_list - add RSS configuration to list
1299*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1300*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1301*4882a593Smuzhiyun * @prof: pointer to flow profile
1302*4882a593Smuzhiyun *
1303*4882a593Smuzhiyun * Assumption: lock has already been acquired for RSS list
1304*4882a593Smuzhiyun */
1305*4882a593Smuzhiyun static enum ice_status
ice_add_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)1306*4882a593Smuzhiyun ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun struct ice_rss_cfg *r, *rss_cfg;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun list_for_each_entry(r, &hw->rss_list_head, l_entry)
1311*4882a593Smuzhiyun if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1312*4882a593Smuzhiyun r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1313*4882a593Smuzhiyun set_bit(vsi_handle, r->vsis);
1314*4882a593Smuzhiyun return 0;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1318*4882a593Smuzhiyun GFP_KERNEL);
1319*4882a593Smuzhiyun if (!rss_cfg)
1320*4882a593Smuzhiyun return ICE_ERR_NO_MEMORY;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1323*4882a593Smuzhiyun rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1324*4882a593Smuzhiyun set_bit(vsi_handle, rss_cfg->vsis);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun return 0;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun #define ICE_FLOW_PROF_HASH_S 0
1332*4882a593Smuzhiyun #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1333*4882a593Smuzhiyun #define ICE_FLOW_PROF_HDR_S 32
1334*4882a593Smuzhiyun #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1335*4882a593Smuzhiyun #define ICE_FLOW_PROF_ENCAP_S 63
1336*4882a593Smuzhiyun #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun #define ICE_RSS_OUTER_HEADERS 1
1339*4882a593Smuzhiyun #define ICE_RSS_INNER_HEADERS 2
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun /* Flow profile ID format:
1342*4882a593Smuzhiyun * [0:31] - Packet match fields
1343*4882a593Smuzhiyun * [32:62] - Protocol header
1344*4882a593Smuzhiyun * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1345*4882a593Smuzhiyun */
1346*4882a593Smuzhiyun #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1347*4882a593Smuzhiyun (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1348*4882a593Smuzhiyun (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1349*4882a593Smuzhiyun ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /**
1352*4882a593Smuzhiyun * ice_add_rss_cfg_sync - add an RSS configuration
1353*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1354*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1355*4882a593Smuzhiyun * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1356*4882a593Smuzhiyun * @addl_hdrs: protocol header fields
1357*4882a593Smuzhiyun * @segs_cnt: packet segment count
1358*4882a593Smuzhiyun *
1359*4882a593Smuzhiyun * Assumption: lock has already been acquired for RSS list
1360*4882a593Smuzhiyun */
1361*4882a593Smuzhiyun static enum ice_status
ice_add_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs,u8 segs_cnt)1362*4882a593Smuzhiyun ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1363*4882a593Smuzhiyun u32 addl_hdrs, u8 segs_cnt)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun const enum ice_block blk = ICE_BLK_RSS;
1366*4882a593Smuzhiyun struct ice_flow_prof *prof = NULL;
1367*4882a593Smuzhiyun struct ice_flow_seg_info *segs;
1368*4882a593Smuzhiyun enum ice_status status;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1371*4882a593Smuzhiyun return ICE_ERR_PARAM;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1374*4882a593Smuzhiyun if (!segs)
1375*4882a593Smuzhiyun return ICE_ERR_NO_MEMORY;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /* Construct the packet segment info from the hashed fields */
1378*4882a593Smuzhiyun status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1379*4882a593Smuzhiyun addl_hdrs);
1380*4882a593Smuzhiyun if (status)
1381*4882a593Smuzhiyun goto exit;
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /* Search for a flow profile that has matching headers, hash fields
1384*4882a593Smuzhiyun * and has the input VSI associated to it. If found, no further
1385*4882a593Smuzhiyun * operations required and exit.
1386*4882a593Smuzhiyun */
1387*4882a593Smuzhiyun prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1388*4882a593Smuzhiyun vsi_handle,
1389*4882a593Smuzhiyun ICE_FLOW_FIND_PROF_CHK_FLDS |
1390*4882a593Smuzhiyun ICE_FLOW_FIND_PROF_CHK_VSI);
1391*4882a593Smuzhiyun if (prof)
1392*4882a593Smuzhiyun goto exit;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /* Check if a flow profile exists with the same protocol headers and
1395*4882a593Smuzhiyun * associated with the input VSI. If so disassociate the VSI from
1396*4882a593Smuzhiyun * this profile. The VSI will be added to a new profile created with
1397*4882a593Smuzhiyun * the protocol header and new hash field configuration.
1398*4882a593Smuzhiyun */
1399*4882a593Smuzhiyun prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1400*4882a593Smuzhiyun vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1401*4882a593Smuzhiyun if (prof) {
1402*4882a593Smuzhiyun status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1403*4882a593Smuzhiyun if (!status)
1404*4882a593Smuzhiyun ice_rem_rss_list(hw, vsi_handle, prof);
1405*4882a593Smuzhiyun else
1406*4882a593Smuzhiyun goto exit;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /* Remove profile if it has no VSIs associated */
1409*4882a593Smuzhiyun if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1410*4882a593Smuzhiyun status = ice_flow_rem_prof(hw, blk, prof->id);
1411*4882a593Smuzhiyun if (status)
1412*4882a593Smuzhiyun goto exit;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* Search for a profile that has same match fields only. If this
1417*4882a593Smuzhiyun * exists then associate the VSI to this profile.
1418*4882a593Smuzhiyun */
1419*4882a593Smuzhiyun prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1420*4882a593Smuzhiyun vsi_handle,
1421*4882a593Smuzhiyun ICE_FLOW_FIND_PROF_CHK_FLDS);
1422*4882a593Smuzhiyun if (prof) {
1423*4882a593Smuzhiyun status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1424*4882a593Smuzhiyun if (!status)
1425*4882a593Smuzhiyun status = ice_add_rss_list(hw, vsi_handle, prof);
1426*4882a593Smuzhiyun goto exit;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /* Create a new flow profile with generated profile and packet
1430*4882a593Smuzhiyun * segment information.
1431*4882a593Smuzhiyun */
1432*4882a593Smuzhiyun status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1433*4882a593Smuzhiyun ICE_FLOW_GEN_PROFID(hashed_flds,
1434*4882a593Smuzhiyun segs[segs_cnt - 1].hdrs,
1435*4882a593Smuzhiyun segs_cnt),
1436*4882a593Smuzhiyun segs, segs_cnt, &prof);
1437*4882a593Smuzhiyun if (status)
1438*4882a593Smuzhiyun goto exit;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1441*4882a593Smuzhiyun /* If association to a new flow profile failed then this profile can
1442*4882a593Smuzhiyun * be removed.
1443*4882a593Smuzhiyun */
1444*4882a593Smuzhiyun if (status) {
1445*4882a593Smuzhiyun ice_flow_rem_prof(hw, blk, prof->id);
1446*4882a593Smuzhiyun goto exit;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun status = ice_add_rss_list(hw, vsi_handle, prof);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun exit:
1452*4882a593Smuzhiyun kfree(segs);
1453*4882a593Smuzhiyun return status;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /**
1457*4882a593Smuzhiyun * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1458*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1459*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1460*4882a593Smuzhiyun * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1461*4882a593Smuzhiyun * @addl_hdrs: protocol header fields
1462*4882a593Smuzhiyun *
1463*4882a593Smuzhiyun * This function will generate a flow profile based on fields associated with
1464*4882a593Smuzhiyun * the input fields to hash on, the flow type and use the VSI number to add
1465*4882a593Smuzhiyun * a flow entry to the profile.
1466*4882a593Smuzhiyun */
1467*4882a593Smuzhiyun enum ice_status
ice_add_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 hashed_flds,u32 addl_hdrs)1468*4882a593Smuzhiyun ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1469*4882a593Smuzhiyun u32 addl_hdrs)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun enum ice_status status;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun if (hashed_flds == ICE_HASH_INVALID ||
1474*4882a593Smuzhiyun !ice_is_vsi_valid(hw, vsi_handle))
1475*4882a593Smuzhiyun return ICE_ERR_PARAM;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun mutex_lock(&hw->rss_locks);
1478*4882a593Smuzhiyun status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1479*4882a593Smuzhiyun ICE_RSS_OUTER_HEADERS);
1480*4882a593Smuzhiyun if (!status)
1481*4882a593Smuzhiyun status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1482*4882a593Smuzhiyun addl_hdrs, ICE_RSS_INNER_HEADERS);
1483*4882a593Smuzhiyun mutex_unlock(&hw->rss_locks);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun return status;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1489*4882a593Smuzhiyun * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1490*4882a593Smuzhiyun * convert its values to their appropriate flow L3, L4 values.
1491*4882a593Smuzhiyun */
1492*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1493*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1494*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1495*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1496*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1497*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1498*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1499*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1500*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1501*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1502*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1503*4882a593Smuzhiyun (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1504*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1507*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1508*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1509*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1510*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1511*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1512*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1513*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1514*4882a593Smuzhiyun (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1515*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1516*4882a593Smuzhiyun #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1517*4882a593Smuzhiyun (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1518*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /**
1521*4882a593Smuzhiyun * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1522*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1523*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1524*4882a593Smuzhiyun * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1525*4882a593Smuzhiyun *
1526*4882a593Smuzhiyun * This function will take the hash bitmap provided by the AVF driver via a
1527*4882a593Smuzhiyun * message, convert it to ICE-compatible values, and configure RSS flow
1528*4882a593Smuzhiyun * profiles.
1529*4882a593Smuzhiyun */
1530*4882a593Smuzhiyun enum ice_status
ice_add_avf_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u64 avf_hash)1531*4882a593Smuzhiyun ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun enum ice_status status = 0;
1534*4882a593Smuzhiyun u64 hash_flds;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1537*4882a593Smuzhiyun !ice_is_vsi_valid(hw, vsi_handle))
1538*4882a593Smuzhiyun return ICE_ERR_PARAM;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun /* Make sure no unsupported bits are specified */
1541*4882a593Smuzhiyun if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1542*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1543*4882a593Smuzhiyun return ICE_ERR_CFG;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun hash_flds = avf_hash;
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun /* Always create an L3 RSS configuration for any L4 RSS configuration */
1548*4882a593Smuzhiyun if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1549*4882a593Smuzhiyun hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1552*4882a593Smuzhiyun hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /* Create the corresponding RSS configuration for each valid hash bit */
1555*4882a593Smuzhiyun while (hash_flds) {
1556*4882a593Smuzhiyun u64 rss_hash = ICE_HASH_INVALID;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1559*4882a593Smuzhiyun if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1560*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV4;
1561*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1562*4882a593Smuzhiyun } else if (hash_flds &
1563*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1564*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV4 |
1565*4882a593Smuzhiyun ICE_FLOW_HASH_TCP_PORT;
1566*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1567*4882a593Smuzhiyun } else if (hash_flds &
1568*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1569*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV4 |
1570*4882a593Smuzhiyun ICE_FLOW_HASH_UDP_PORT;
1571*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1572*4882a593Smuzhiyun } else if (hash_flds &
1573*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1574*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV4 |
1575*4882a593Smuzhiyun ICE_FLOW_HASH_SCTP_PORT;
1576*4882a593Smuzhiyun hash_flds &=
1577*4882a593Smuzhiyun ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1580*4882a593Smuzhiyun if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1581*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV6;
1582*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1583*4882a593Smuzhiyun } else if (hash_flds &
1584*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1585*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV6 |
1586*4882a593Smuzhiyun ICE_FLOW_HASH_TCP_PORT;
1587*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1588*4882a593Smuzhiyun } else if (hash_flds &
1589*4882a593Smuzhiyun ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1590*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV6 |
1591*4882a593Smuzhiyun ICE_FLOW_HASH_UDP_PORT;
1592*4882a593Smuzhiyun hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1593*4882a593Smuzhiyun } else if (hash_flds &
1594*4882a593Smuzhiyun BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1595*4882a593Smuzhiyun rss_hash = ICE_FLOW_HASH_IPV6 |
1596*4882a593Smuzhiyun ICE_FLOW_HASH_SCTP_PORT;
1597*4882a593Smuzhiyun hash_flds &=
1598*4882a593Smuzhiyun ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun if (rss_hash == ICE_HASH_INVALID)
1603*4882a593Smuzhiyun return ICE_ERR_OUT_OF_RANGE;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1606*4882a593Smuzhiyun ICE_FLOW_SEG_HDR_NONE);
1607*4882a593Smuzhiyun if (status)
1608*4882a593Smuzhiyun break;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun return status;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /**
1615*4882a593Smuzhiyun * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1616*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1617*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1618*4882a593Smuzhiyun */
ice_replay_rss_cfg(struct ice_hw * hw,u16 vsi_handle)1619*4882a593Smuzhiyun enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun enum ice_status status = 0;
1622*4882a593Smuzhiyun struct ice_rss_cfg *r;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun if (!ice_is_vsi_valid(hw, vsi_handle))
1625*4882a593Smuzhiyun return ICE_ERR_PARAM;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun mutex_lock(&hw->rss_locks);
1628*4882a593Smuzhiyun list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1629*4882a593Smuzhiyun if (test_bit(vsi_handle, r->vsis)) {
1630*4882a593Smuzhiyun status = ice_add_rss_cfg_sync(hw, vsi_handle,
1631*4882a593Smuzhiyun r->hashed_flds,
1632*4882a593Smuzhiyun r->packet_hdr,
1633*4882a593Smuzhiyun ICE_RSS_OUTER_HEADERS);
1634*4882a593Smuzhiyun if (status)
1635*4882a593Smuzhiyun break;
1636*4882a593Smuzhiyun status = ice_add_rss_cfg_sync(hw, vsi_handle,
1637*4882a593Smuzhiyun r->hashed_flds,
1638*4882a593Smuzhiyun r->packet_hdr,
1639*4882a593Smuzhiyun ICE_RSS_INNER_HEADERS);
1640*4882a593Smuzhiyun if (status)
1641*4882a593Smuzhiyun break;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun mutex_unlock(&hw->rss_locks);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun return status;
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /**
1650*4882a593Smuzhiyun * ice_get_rss_cfg - returns hashed fields for the given header types
1651*4882a593Smuzhiyun * @hw: pointer to the hardware structure
1652*4882a593Smuzhiyun * @vsi_handle: software VSI handle
1653*4882a593Smuzhiyun * @hdrs: protocol header type
1654*4882a593Smuzhiyun *
1655*4882a593Smuzhiyun * This function will return the match fields of the first instance of flow
1656*4882a593Smuzhiyun * profile having the given header types and containing input VSI
1657*4882a593Smuzhiyun */
ice_get_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u32 hdrs)1658*4882a593Smuzhiyun u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun u64 rss_hash = ICE_HASH_INVALID;
1661*4882a593Smuzhiyun struct ice_rss_cfg *r;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /* verify if the protocol header is non zero and VSI is valid */
1664*4882a593Smuzhiyun if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1665*4882a593Smuzhiyun return ICE_HASH_INVALID;
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun mutex_lock(&hw->rss_locks);
1668*4882a593Smuzhiyun list_for_each_entry(r, &hw->rss_list_head, l_entry)
1669*4882a593Smuzhiyun if (test_bit(vsi_handle, r->vsis) &&
1670*4882a593Smuzhiyun r->packet_hdr == hdrs) {
1671*4882a593Smuzhiyun rss_hash = r->hashed_flds;
1672*4882a593Smuzhiyun break;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun mutex_unlock(&hw->rss_locks);
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun return rss_hash;
1677*4882a593Smuzhiyun }
1678