1*4882a593Smuzhiyun /*******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun * OpenFabrics.org BSD license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun * without modification, are permitted provided that the following
13*4882a593Smuzhiyun * conditions are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * - Redistributions of source code must retain the above
16*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun * disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun * provided with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun * SOFTWARE.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun *******************************************************************************/
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include "i40iw_osdep.h"
36*4882a593Smuzhiyun #include "i40iw_register.h"
37*4882a593Smuzhiyun #include "i40iw_status.h"
38*4882a593Smuzhiyun #include "i40iw_hmc.h"
39*4882a593Smuzhiyun #include "i40iw_d.h"
40*4882a593Smuzhiyun #include "i40iw_type.h"
41*4882a593Smuzhiyun #include "i40iw_p.h"
42*4882a593Smuzhiyun #include "i40iw_vf.h"
43*4882a593Smuzhiyun #include "i40iw_virtchnl.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * i40iw_find_sd_index_limit - finds segment descriptor index limit
47*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information structure
48*4882a593Smuzhiyun * @type: type of HMC resources we're searching
49*4882a593Smuzhiyun * @index: starting index for the object
50*4882a593Smuzhiyun * @cnt: number of objects we're trying to create
51*4882a593Smuzhiyun * @sd_idx: pointer to return index of the segment descriptor in question
52*4882a593Smuzhiyun * @sd_limit: pointer to return the maximum number of segment descriptors
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * This function calculates the segment descriptor index and index limit
55*4882a593Smuzhiyun * for the resource defined by i40iw_hmc_rsrc_type.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun
i40iw_find_sd_index_limit(struct i40iw_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * sd_idx,u32 * sd_limit)58*4882a593Smuzhiyun static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
59*4882a593Smuzhiyun u32 type,
60*4882a593Smuzhiyun u32 idx,
61*4882a593Smuzhiyun u32 cnt,
62*4882a593Smuzhiyun u32 *sd_idx,
63*4882a593Smuzhiyun u32 *sd_limit)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun u64 fpm_addr, fpm_limit;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun fpm_addr = hmc_info->hmc_obj[(type)].base +
68*4882a593Smuzhiyun hmc_info->hmc_obj[type].size * idx;
69*4882a593Smuzhiyun fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
70*4882a593Smuzhiyun *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
71*4882a593Smuzhiyun *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
72*4882a593Smuzhiyun *sd_limit += 1;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun * i40iw_find_pd_index_limit - finds page descriptor index limit
77*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information struct
78*4882a593Smuzhiyun * @type: HMC resource type we're examining
79*4882a593Smuzhiyun * @idx: starting index for the object
80*4882a593Smuzhiyun * @cnt: number of objects we're trying to create
81*4882a593Smuzhiyun * @pd_index: pointer to return page descriptor index
82*4882a593Smuzhiyun * @pd_limit: pointer to return page descriptor index limit
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Calculates the page descriptor index and index limit for the resource
85*4882a593Smuzhiyun * defined by i40iw_hmc_rsrc_type.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun
i40iw_find_pd_index_limit(struct i40iw_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * pd_idx,u32 * pd_limit)88*4882a593Smuzhiyun static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
89*4882a593Smuzhiyun u32 type,
90*4882a593Smuzhiyun u32 idx,
91*4882a593Smuzhiyun u32 cnt,
92*4882a593Smuzhiyun u32 *pd_idx,
93*4882a593Smuzhiyun u32 *pd_limit)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun u64 fpm_adr, fpm_limit;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun fpm_adr = hmc_info->hmc_obj[type].base +
98*4882a593Smuzhiyun hmc_info->hmc_obj[type].size * idx;
99*4882a593Smuzhiyun fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
100*4882a593Smuzhiyun *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
101*4882a593Smuzhiyun *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
102*4882a593Smuzhiyun *(pd_limit) += 1;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * i40iw_set_sd_entry - setup entry for sd programming
107*4882a593Smuzhiyun * @pa: physical addr
108*4882a593Smuzhiyun * @idx: sd index
109*4882a593Smuzhiyun * @type: paged or direct sd
110*4882a593Smuzhiyun * @entry: sd entry ptr
111*4882a593Smuzhiyun */
i40iw_set_sd_entry(u64 pa,u32 idx,enum i40iw_sd_entry_type type,struct update_sd_entry * entry)112*4882a593Smuzhiyun static inline void i40iw_set_sd_entry(u64 pa,
113*4882a593Smuzhiyun u32 idx,
114*4882a593Smuzhiyun enum i40iw_sd_entry_type type,
115*4882a593Smuzhiyun struct update_sd_entry *entry)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
118*4882a593Smuzhiyun (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
119*4882a593Smuzhiyun I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
120*4882a593Smuzhiyun (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
121*4882a593Smuzhiyun entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * i40iw_clr_sd_entry - setup entry for sd clear
126*4882a593Smuzhiyun * @idx: sd index
127*4882a593Smuzhiyun * @type: paged or direct sd
128*4882a593Smuzhiyun * @entry: sd entry ptr
129*4882a593Smuzhiyun */
i40iw_clr_sd_entry(u32 idx,enum i40iw_sd_entry_type type,struct update_sd_entry * entry)130*4882a593Smuzhiyun static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
131*4882a593Smuzhiyun struct update_sd_entry *entry)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun entry->data = (I40IW_HMC_MAX_BP_COUNT <<
134*4882a593Smuzhiyun I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
135*4882a593Smuzhiyun (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
136*4882a593Smuzhiyun I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
137*4882a593Smuzhiyun entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * i40iw_hmc_sd_one - setup 1 sd entry for cqp
142*4882a593Smuzhiyun * @dev: pointer to the device structure
143*4882a593Smuzhiyun * @hmc_fn_id: hmc's function id
144*4882a593Smuzhiyun * @pa: physical addr
145*4882a593Smuzhiyun * @sd_idx: sd index
146*4882a593Smuzhiyun * @type: paged or direct sd
147*4882a593Smuzhiyun * @setsd: flag to set or clear sd
148*4882a593Smuzhiyun */
i40iw_hmc_sd_one(struct i40iw_sc_dev * dev,u8 hmc_fn_id,u64 pa,u32 sd_idx,enum i40iw_sd_entry_type type,bool setsd)149*4882a593Smuzhiyun enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
150*4882a593Smuzhiyun u8 hmc_fn_id,
151*4882a593Smuzhiyun u64 pa, u32 sd_idx,
152*4882a593Smuzhiyun enum i40iw_sd_entry_type type,
153*4882a593Smuzhiyun bool setsd)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct i40iw_update_sds_info sdinfo;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun sdinfo.cnt = 1;
158*4882a593Smuzhiyun sdinfo.hmc_fn_id = hmc_fn_id;
159*4882a593Smuzhiyun if (setsd)
160*4882a593Smuzhiyun i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
161*4882a593Smuzhiyun else
162*4882a593Smuzhiyun i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return dev->cqp->process_cqp_sds(dev, &sdinfo);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * i40iw_hmc_sd_grp - setup group od sd entries for cqp
169*4882a593Smuzhiyun * @dev: pointer to the device structure
170*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information struct
171*4882a593Smuzhiyun * @sd_index: sd index
172*4882a593Smuzhiyun * @sd_cnt: number of sd entries
173*4882a593Smuzhiyun * @setsd: flag to set or clear sd
174*4882a593Smuzhiyun */
i40iw_hmc_sd_grp(struct i40iw_sc_dev * dev,struct i40iw_hmc_info * hmc_info,u32 sd_index,u32 sd_cnt,bool setsd)175*4882a593Smuzhiyun static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
176*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
177*4882a593Smuzhiyun u32 sd_index,
178*4882a593Smuzhiyun u32 sd_cnt,
179*4882a593Smuzhiyun bool setsd)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
182*4882a593Smuzhiyun struct i40iw_update_sds_info sdinfo;
183*4882a593Smuzhiyun u64 pa;
184*4882a593Smuzhiyun u32 i;
185*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun memset(&sdinfo, 0, sizeof(sdinfo));
188*4882a593Smuzhiyun sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
189*4882a593Smuzhiyun for (i = sd_index; i < sd_index + sd_cnt; i++) {
190*4882a593Smuzhiyun sd_entry = &hmc_info->sd_table.sd_entry[i];
191*4882a593Smuzhiyun if (!sd_entry ||
192*4882a593Smuzhiyun (!sd_entry->valid && setsd) ||
193*4882a593Smuzhiyun (sd_entry->valid && !setsd))
194*4882a593Smuzhiyun continue;
195*4882a593Smuzhiyun if (setsd) {
196*4882a593Smuzhiyun pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
197*4882a593Smuzhiyun sd_entry->u.pd_table.pd_page_addr.pa :
198*4882a593Smuzhiyun sd_entry->u.bp.addr.pa;
199*4882a593Smuzhiyun i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
200*4882a593Smuzhiyun &sdinfo.entry[sdinfo.cnt]);
201*4882a593Smuzhiyun } else {
202*4882a593Smuzhiyun i40iw_clr_sd_entry(i, sd_entry->entry_type,
203*4882a593Smuzhiyun &sdinfo.entry[sdinfo.cnt]);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun sdinfo.cnt++;
206*4882a593Smuzhiyun if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
207*4882a593Smuzhiyun ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
208*4882a593Smuzhiyun if (ret_code) {
209*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC,
210*4882a593Smuzhiyun "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
211*4882a593Smuzhiyun ret_code);
212*4882a593Smuzhiyun return ret_code;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun sdinfo.cnt = 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun if (sdinfo.cnt)
218*4882a593Smuzhiyun ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return ret_code;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
225*4882a593Smuzhiyun * @dev: pointer to the device structure
226*4882a593Smuzhiyun * @hmc_fn_id: hmc's function id
227*4882a593Smuzhiyun */
i40iw_vfdev_from_fpm(struct i40iw_sc_dev * dev,u8 hmc_fn_id)228*4882a593Smuzhiyun struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct i40iw_vfdev *vf_dev = NULL;
231*4882a593Smuzhiyun u16 idx;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
234*4882a593Smuzhiyun if (dev->vf_dev[idx] &&
235*4882a593Smuzhiyun ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
236*4882a593Smuzhiyun vf_dev = dev->vf_dev[idx];
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun return vf_dev;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
245*4882a593Smuzhiyun * @dev: pointer to the device structure
246*4882a593Smuzhiyun * @hmc_fn_id: hmc's function id
247*4882a593Smuzhiyun */
i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev * dev,u8 hmc_fn_id)248*4882a593Smuzhiyun struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
249*4882a593Smuzhiyun u8 hmc_fn_id)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info = NULL;
252*4882a593Smuzhiyun u16 idx;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
255*4882a593Smuzhiyun if (dev->vf_dev[idx] &&
256*4882a593Smuzhiyun ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
257*4882a593Smuzhiyun hmc_info = &dev->vf_dev[idx]->hmc_info;
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun return hmc_info;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
266*4882a593Smuzhiyun * @dev: pointer to the device structure
267*4882a593Smuzhiyun * @info: create obj info
268*4882a593Smuzhiyun */
i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev * dev,struct i40iw_hmc_create_obj_info * info)269*4882a593Smuzhiyun static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
270*4882a593Smuzhiyun struct i40iw_hmc_create_obj_info *info)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
273*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if ((info->start_idx + info->count) >
276*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt)
277*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!info->add_sd_cnt)
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return i40iw_hmc_sd_grp(dev, info->hmc_info,
283*4882a593Smuzhiyun info->hmc_info->sd_indexes[0],
284*4882a593Smuzhiyun info->add_sd_cnt, true);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
289*4882a593Smuzhiyun * @dev: pointer to the device structure
290*4882a593Smuzhiyun * @info: pointer to i40iw_hmc_iw_create_obj_info struct
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * This will allocate memory for PDs and backing pages and populate
293*4882a593Smuzhiyun * the sd and pd entries.
294*4882a593Smuzhiyun */
i40iw_sc_create_hmc_obj(struct i40iw_sc_dev * dev,struct i40iw_hmc_create_obj_info * info)295*4882a593Smuzhiyun enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
296*4882a593Smuzhiyun struct i40iw_hmc_create_obj_info *info)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
299*4882a593Smuzhiyun u32 sd_idx, sd_lmt;
300*4882a593Smuzhiyun u32 pd_idx = 0, pd_lmt = 0;
301*4882a593Smuzhiyun u32 pd_idx1 = 0, pd_lmt1 = 0;
302*4882a593Smuzhiyun u32 i, j;
303*4882a593Smuzhiyun bool pd_error = false;
304*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
307*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if ((info->start_idx + info->count) >
310*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
311*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC,
312*4882a593Smuzhiyun "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
313*4882a593Smuzhiyun __func__, info->rsrc_type, info->start_idx, info->count,
314*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt);
315*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (!dev->is_pf)
319*4882a593Smuzhiyun return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
322*4882a593Smuzhiyun info->start_idx, info->count,
323*4882a593Smuzhiyun &sd_idx, &sd_lmt);
324*4882a593Smuzhiyun if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
325*4882a593Smuzhiyun sd_lmt > info->hmc_info->sd_table.sd_cnt) {
326*4882a593Smuzhiyun return I40IW_ERR_INVALID_SD_INDEX;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
329*4882a593Smuzhiyun info->start_idx, info->count, &pd_idx, &pd_lmt);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun for (j = sd_idx; j < sd_lmt; j++) {
332*4882a593Smuzhiyun ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
333*4882a593Smuzhiyun j,
334*4882a593Smuzhiyun info->entry_type,
335*4882a593Smuzhiyun I40IW_HMC_DIRECT_BP_SIZE);
336*4882a593Smuzhiyun if (ret_code)
337*4882a593Smuzhiyun goto exit_sd_error;
338*4882a593Smuzhiyun sd_entry = &info->hmc_info->sd_table.sd_entry[j];
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
341*4882a593Smuzhiyun ((dev->hmc_info == info->hmc_info) &&
342*4882a593Smuzhiyun (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
343*4882a593Smuzhiyun pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
344*4882a593Smuzhiyun pd_lmt1 = min(pd_lmt,
345*4882a593Smuzhiyun (j + 1) * I40IW_HMC_MAX_BP_COUNT);
346*4882a593Smuzhiyun for (i = pd_idx1; i < pd_lmt1; i++) {
347*4882a593Smuzhiyun /* update the pd table entry */
348*4882a593Smuzhiyun ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
349*4882a593Smuzhiyun i, NULL);
350*4882a593Smuzhiyun if (ret_code) {
351*4882a593Smuzhiyun pd_error = true;
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun if (pd_error) {
356*4882a593Smuzhiyun while (i && (i > pd_idx1)) {
357*4882a593Smuzhiyun i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
358*4882a593Smuzhiyun info->is_pf);
359*4882a593Smuzhiyun i--;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun if (sd_entry->valid)
364*4882a593Smuzhiyun continue;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
367*4882a593Smuzhiyun info->add_sd_cnt++;
368*4882a593Smuzhiyun sd_entry->valid = true;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun return i40iw_hmc_finish_add_sd_reg(dev, info);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun exit_sd_error:
373*4882a593Smuzhiyun while (j && (j > sd_idx)) {
374*4882a593Smuzhiyun sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
375*4882a593Smuzhiyun switch (sd_entry->entry_type) {
376*4882a593Smuzhiyun case I40IW_SD_TYPE_PAGED:
377*4882a593Smuzhiyun pd_idx1 = max(pd_idx,
378*4882a593Smuzhiyun (j - 1) * I40IW_HMC_MAX_BP_COUNT);
379*4882a593Smuzhiyun pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
380*4882a593Smuzhiyun for (i = pd_idx1; i < pd_lmt1; i++)
381*4882a593Smuzhiyun i40iw_prep_remove_pd_page(info->hmc_info, i);
382*4882a593Smuzhiyun break;
383*4882a593Smuzhiyun case I40IW_SD_TYPE_DIRECT:
384*4882a593Smuzhiyun i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun default:
387*4882a593Smuzhiyun ret_code = I40IW_ERR_INVALID_SD_TYPE;
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun j--;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun return ret_code;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * i40iw_finish_del_sd_reg - delete sd entries for objects
398*4882a593Smuzhiyun * @dev: pointer to the device structure
399*4882a593Smuzhiyun * @info: dele obj info
400*4882a593Smuzhiyun * @reset: true if called before reset
401*4882a593Smuzhiyun */
i40iw_finish_del_sd_reg(struct i40iw_sc_dev * dev,struct i40iw_hmc_del_obj_info * info,bool reset)402*4882a593Smuzhiyun static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
403*4882a593Smuzhiyun struct i40iw_hmc_del_obj_info *info,
404*4882a593Smuzhiyun bool reset)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
407*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
408*4882a593Smuzhiyun u32 i, sd_idx;
409*4882a593Smuzhiyun struct i40iw_dma_mem *mem;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (dev->is_pf && !reset)
412*4882a593Smuzhiyun ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
413*4882a593Smuzhiyun info->hmc_info->sd_indexes[0],
414*4882a593Smuzhiyun info->del_sd_cnt, false);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (ret_code)
417*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun for (i = 0; i < info->del_sd_cnt; i++) {
420*4882a593Smuzhiyun sd_idx = info->hmc_info->sd_indexes[i];
421*4882a593Smuzhiyun sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
422*4882a593Smuzhiyun if (!sd_entry)
423*4882a593Smuzhiyun continue;
424*4882a593Smuzhiyun mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
425*4882a593Smuzhiyun &sd_entry->u.pd_table.pd_page_addr :
426*4882a593Smuzhiyun &sd_entry->u.bp.addr;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (!mem || !mem->va)
429*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
430*4882a593Smuzhiyun else
431*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, mem);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun return ret_code;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /**
437*4882a593Smuzhiyun * i40iw_del_iw_hmc_obj - remove pe hmc objects
438*4882a593Smuzhiyun * @dev: pointer to the device structure
439*4882a593Smuzhiyun * @info: pointer to i40iw_hmc_del_obj_info struct
440*4882a593Smuzhiyun * @reset: true if called before reset
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * This will de-populate the SDs and PDs. It frees
443*4882a593Smuzhiyun * the memory for PDS and backing storage. After this function is returned,
444*4882a593Smuzhiyun * caller should deallocate memory allocated previously for
445*4882a593Smuzhiyun * book-keeping information about PDs and backing storage.
446*4882a593Smuzhiyun */
i40iw_sc_del_hmc_obj(struct i40iw_sc_dev * dev,struct i40iw_hmc_del_obj_info * info,bool reset)447*4882a593Smuzhiyun enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
448*4882a593Smuzhiyun struct i40iw_hmc_del_obj_info *info,
449*4882a593Smuzhiyun bool reset)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct i40iw_hmc_pd_table *pd_table;
452*4882a593Smuzhiyun u32 sd_idx, sd_lmt;
453*4882a593Smuzhiyun u32 pd_idx, pd_lmt, rel_pd_idx;
454*4882a593Smuzhiyun u32 i, j;
455*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
458*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC,
459*4882a593Smuzhiyun "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
460*4882a593Smuzhiyun __func__, info->start_idx, info->rsrc_type,
461*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt);
462*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if ((info->start_idx + info->count) >
466*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
467*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC,
468*4882a593Smuzhiyun "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
469*4882a593Smuzhiyun __func__, info->start_idx, info->count,
470*4882a593Smuzhiyun info->rsrc_type,
471*4882a593Smuzhiyun info->hmc_info->hmc_obj[info->rsrc_type].cnt);
472*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun if (!dev->is_pf) {
475*4882a593Smuzhiyun ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
476*4882a593Smuzhiyun info->count);
477*4882a593Smuzhiyun if (info->rsrc_type != I40IW_HMC_IW_PBLE)
478*4882a593Smuzhiyun return ret_code;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
482*4882a593Smuzhiyun info->start_idx, info->count, &pd_idx, &pd_lmt);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (j = pd_idx; j < pd_lmt; j++) {
485*4882a593Smuzhiyun sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
488*4882a593Smuzhiyun I40IW_SD_TYPE_PAGED)
489*4882a593Smuzhiyun continue;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
492*4882a593Smuzhiyun pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
493*4882a593Smuzhiyun if (pd_table->pd_entry[rel_pd_idx].valid) {
494*4882a593Smuzhiyun ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
495*4882a593Smuzhiyun info->is_pf);
496*4882a593Smuzhiyun if (ret_code) {
497*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
498*4882a593Smuzhiyun return ret_code;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
504*4882a593Smuzhiyun info->start_idx, info->count, &sd_idx, &sd_lmt);
505*4882a593Smuzhiyun if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
506*4882a593Smuzhiyun sd_lmt > info->hmc_info->sd_table.sd_cnt) {
507*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
508*4882a593Smuzhiyun return I40IW_ERR_INVALID_SD_INDEX;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun for (i = sd_idx; i < sd_lmt; i++) {
512*4882a593Smuzhiyun if (!info->hmc_info->sd_table.sd_entry[i].valid)
513*4882a593Smuzhiyun continue;
514*4882a593Smuzhiyun switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
515*4882a593Smuzhiyun case I40IW_SD_TYPE_DIRECT:
516*4882a593Smuzhiyun ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
517*4882a593Smuzhiyun if (!ret_code) {
518*4882a593Smuzhiyun info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
519*4882a593Smuzhiyun info->del_sd_cnt++;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun break;
522*4882a593Smuzhiyun case I40IW_SD_TYPE_PAGED:
523*4882a593Smuzhiyun ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
524*4882a593Smuzhiyun if (!ret_code) {
525*4882a593Smuzhiyun info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
526*4882a593Smuzhiyun info->del_sd_cnt++;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun break;
529*4882a593Smuzhiyun default:
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun return i40iw_finish_del_sd_reg(dev, info, reset);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /**
537*4882a593Smuzhiyun * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
538*4882a593Smuzhiyun * @hw: pointer to our hw struct
539*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information struct
540*4882a593Smuzhiyun * @sd_index: segment descriptor index to manipulate
541*4882a593Smuzhiyun * @type: what type of segment descriptor we're manipulating
542*4882a593Smuzhiyun * @direct_mode_sz: size to alloc in direct mode
543*4882a593Smuzhiyun */
i40iw_add_sd_table_entry(struct i40iw_hw * hw,struct i40iw_hmc_info * hmc_info,u32 sd_index,enum i40iw_sd_entry_type type,u64 direct_mode_sz)544*4882a593Smuzhiyun enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
545*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
546*4882a593Smuzhiyun u32 sd_index,
547*4882a593Smuzhiyun enum i40iw_sd_entry_type type,
548*4882a593Smuzhiyun u64 direct_mode_sz)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
551*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
552*4882a593Smuzhiyun bool dma_mem_alloc_done = false;
553*4882a593Smuzhiyun struct i40iw_dma_mem mem;
554*4882a593Smuzhiyun u64 alloc_len;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
557*4882a593Smuzhiyun if (!sd_entry->valid) {
558*4882a593Smuzhiyun if (type == I40IW_SD_TYPE_PAGED)
559*4882a593Smuzhiyun alloc_len = I40IW_HMC_PAGED_BP_SIZE;
560*4882a593Smuzhiyun else
561*4882a593Smuzhiyun alloc_len = direct_mode_sz;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* allocate a 4K pd page or 2M backing page */
564*4882a593Smuzhiyun ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
565*4882a593Smuzhiyun I40IW_HMC_PD_BP_BUF_ALIGNMENT);
566*4882a593Smuzhiyun if (ret_code)
567*4882a593Smuzhiyun goto exit;
568*4882a593Smuzhiyun dma_mem_alloc_done = true;
569*4882a593Smuzhiyun if (type == I40IW_SD_TYPE_PAGED) {
570*4882a593Smuzhiyun ret_code = i40iw_allocate_virt_mem(hw,
571*4882a593Smuzhiyun &sd_entry->u.pd_table.pd_entry_virt_mem,
572*4882a593Smuzhiyun sizeof(struct i40iw_hmc_pd_entry) * 512);
573*4882a593Smuzhiyun if (ret_code)
574*4882a593Smuzhiyun goto exit;
575*4882a593Smuzhiyun sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
576*4882a593Smuzhiyun sd_entry->u.pd_table.pd_entry_virt_mem.va;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
579*4882a593Smuzhiyun } else {
580*4882a593Smuzhiyun memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
581*4882a593Smuzhiyun sd_entry->u.bp.sd_pd_index = sd_index;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
589*4882a593Smuzhiyun I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
590*4882a593Smuzhiyun exit:
591*4882a593Smuzhiyun if (ret_code)
592*4882a593Smuzhiyun if (dma_mem_alloc_done)
593*4882a593Smuzhiyun i40iw_free_dma_mem(hw, &mem);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun return ret_code;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /**
599*4882a593Smuzhiyun * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
600*4882a593Smuzhiyun * @hw: pointer to our HW structure
601*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information structure
602*4882a593Smuzhiyun * @pd_index: which page descriptor index to manipulate
603*4882a593Smuzhiyun * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * This function:
606*4882a593Smuzhiyun * 1. Initializes the pd entry
607*4882a593Smuzhiyun * 2. Adds pd_entry in the pd_table
608*4882a593Smuzhiyun * 3. Mark the entry valid in i40iw_hmc_pd_entry structure
609*4882a593Smuzhiyun * 4. Initializes the pd_entry's ref count to 1
610*4882a593Smuzhiyun * assumptions:
611*4882a593Smuzhiyun * 1. The memory for pd should be pinned down, physically contiguous and
612*4882a593Smuzhiyun * aligned on 4K boundary and zeroed memory.
613*4882a593Smuzhiyun * 2. It should be 4K in size.
614*4882a593Smuzhiyun */
i40iw_add_pd_table_entry(struct i40iw_hw * hw,struct i40iw_hmc_info * hmc_info,u32 pd_index,struct i40iw_dma_mem * rsrc_pg)615*4882a593Smuzhiyun enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
616*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
617*4882a593Smuzhiyun u32 pd_index,
618*4882a593Smuzhiyun struct i40iw_dma_mem *rsrc_pg)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
621*4882a593Smuzhiyun struct i40iw_hmc_pd_table *pd_table;
622*4882a593Smuzhiyun struct i40iw_hmc_pd_entry *pd_entry;
623*4882a593Smuzhiyun struct i40iw_dma_mem mem;
624*4882a593Smuzhiyun struct i40iw_dma_mem *page = &mem;
625*4882a593Smuzhiyun u32 sd_idx, rel_pd_idx;
626*4882a593Smuzhiyun u64 *pd_addr;
627*4882a593Smuzhiyun u64 page_desc;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
630*4882a593Smuzhiyun return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
633*4882a593Smuzhiyun if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
634*4882a593Smuzhiyun return 0;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
637*4882a593Smuzhiyun pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
638*4882a593Smuzhiyun pd_entry = &pd_table->pd_entry[rel_pd_idx];
639*4882a593Smuzhiyun if (!pd_entry->valid) {
640*4882a593Smuzhiyun if (rsrc_pg) {
641*4882a593Smuzhiyun pd_entry->rsrc_pg = true;
642*4882a593Smuzhiyun page = rsrc_pg;
643*4882a593Smuzhiyun } else {
644*4882a593Smuzhiyun ret_code = i40iw_allocate_dma_mem(hw, page,
645*4882a593Smuzhiyun I40IW_HMC_PAGED_BP_SIZE,
646*4882a593Smuzhiyun I40IW_HMC_PD_BP_BUF_ALIGNMENT);
647*4882a593Smuzhiyun if (ret_code)
648*4882a593Smuzhiyun return ret_code;
649*4882a593Smuzhiyun pd_entry->rsrc_pg = false;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
653*4882a593Smuzhiyun pd_entry->bp.sd_pd_index = pd_index;
654*4882a593Smuzhiyun pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
655*4882a593Smuzhiyun page_desc = page->pa | 0x1;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun pd_addr = (u64 *)pd_table->pd_page_addr.va;
658*4882a593Smuzhiyun pd_addr += rel_pd_idx;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun pd_entry->sd_index = sd_idx;
663*4882a593Smuzhiyun pd_entry->valid = true;
664*4882a593Smuzhiyun I40IW_INC_PD_REFCNT(pd_table);
665*4882a593Smuzhiyun if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
666*4882a593Smuzhiyun I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
667*4882a593Smuzhiyun else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
668*4882a593Smuzhiyun I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
669*4882a593Smuzhiyun hmc_info->hmc_fn_id);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun I40IW_INC_BP_REFCNT(&pd_entry->bp);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun return 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * i40iw_remove_pd_bp - remove a backing page from a page descriptor
678*4882a593Smuzhiyun * @hw: pointer to our HW structure
679*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information structure
680*4882a593Smuzhiyun * @idx: the page index
681*4882a593Smuzhiyun * @is_pf: distinguishes a VF from a PF
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * This function:
684*4882a593Smuzhiyun * 1. Marks the entry in pd table (for paged address mode) or in sd table
685*4882a593Smuzhiyun * (for direct address mode) invalid.
686*4882a593Smuzhiyun * 2. Write to register PMPDINV to invalidate the backing page in FV cache
687*4882a593Smuzhiyun * 3. Decrement the ref count for the pd _entry
688*4882a593Smuzhiyun * assumptions:
689*4882a593Smuzhiyun * 1. Caller can deallocate the memory used by backing storage after this
690*4882a593Smuzhiyun * function returns.
691*4882a593Smuzhiyun */
i40iw_remove_pd_bp(struct i40iw_hw * hw,struct i40iw_hmc_info * hmc_info,u32 idx,bool is_pf)692*4882a593Smuzhiyun enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
693*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
694*4882a593Smuzhiyun u32 idx,
695*4882a593Smuzhiyun bool is_pf)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun struct i40iw_hmc_pd_entry *pd_entry;
698*4882a593Smuzhiyun struct i40iw_hmc_pd_table *pd_table;
699*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
700*4882a593Smuzhiyun u32 sd_idx, rel_pd_idx;
701*4882a593Smuzhiyun struct i40iw_dma_mem *mem;
702*4882a593Smuzhiyun u64 *pd_addr;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
705*4882a593Smuzhiyun rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
706*4882a593Smuzhiyun if (sd_idx >= hmc_info->sd_table.sd_cnt)
707*4882a593Smuzhiyun return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
710*4882a593Smuzhiyun if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
711*4882a593Smuzhiyun return I40IW_ERR_INVALID_SD_TYPE;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
714*4882a593Smuzhiyun pd_entry = &pd_table->pd_entry[rel_pd_idx];
715*4882a593Smuzhiyun I40IW_DEC_BP_REFCNT(&pd_entry->bp);
716*4882a593Smuzhiyun if (pd_entry->bp.ref_cnt)
717*4882a593Smuzhiyun return 0;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun pd_entry->valid = false;
720*4882a593Smuzhiyun I40IW_DEC_PD_REFCNT(pd_table);
721*4882a593Smuzhiyun pd_addr = (u64 *)pd_table->pd_page_addr.va;
722*4882a593Smuzhiyun pd_addr += rel_pd_idx;
723*4882a593Smuzhiyun memset(pd_addr, 0, sizeof(u64));
724*4882a593Smuzhiyun if (is_pf)
725*4882a593Smuzhiyun I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
726*4882a593Smuzhiyun else
727*4882a593Smuzhiyun I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
728*4882a593Smuzhiyun hmc_info->hmc_fn_id);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (!pd_entry->rsrc_pg) {
731*4882a593Smuzhiyun mem = &pd_entry->bp.addr;
732*4882a593Smuzhiyun if (!mem || !mem->va)
733*4882a593Smuzhiyun return I40IW_ERR_PARAM;
734*4882a593Smuzhiyun i40iw_free_dma_mem(hw, mem);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun if (!pd_table->ref_cnt)
737*4882a593Smuzhiyun i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return 0;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /**
743*4882a593Smuzhiyun * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
744*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information structure
745*4882a593Smuzhiyun * @idx: the page index
746*4882a593Smuzhiyun */
i40iw_prep_remove_sd_bp(struct i40iw_hmc_info * hmc_info,u32 idx)747*4882a593Smuzhiyun enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun sd_entry = &hmc_info->sd_table.sd_entry[idx];
752*4882a593Smuzhiyun I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
753*4882a593Smuzhiyun if (sd_entry->u.bp.ref_cnt)
754*4882a593Smuzhiyun return I40IW_ERR_NOT_READY;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
757*4882a593Smuzhiyun sd_entry->valid = false;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /**
763*4882a593Smuzhiyun * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
764*4882a593Smuzhiyun * @hmc_info: pointer to the HMC configuration information structure
765*4882a593Smuzhiyun * @idx: segment descriptor index to find the relevant page descriptor
766*4882a593Smuzhiyun */
i40iw_prep_remove_pd_page(struct i40iw_hmc_info * hmc_info,u32 idx)767*4882a593Smuzhiyun enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
768*4882a593Smuzhiyun u32 idx)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun struct i40iw_hmc_sd_entry *sd_entry;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun sd_entry = &hmc_info->sd_table.sd_entry[idx];
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (sd_entry->u.pd_table.ref_cnt)
775*4882a593Smuzhiyun return I40IW_ERR_NOT_READY;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun sd_entry->valid = false;
778*4882a593Smuzhiyun I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun return 0;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /**
784*4882a593Smuzhiyun * i40iw_pf_init_vfhmc -
785*4882a593Smuzhiyun * @vf_cnt_array: array of cnt values of iwarp hmc objects
786*4882a593Smuzhiyun * @vf_hmc_fn_id: hmc function id ofr vf driver
787*4882a593Smuzhiyun * @dev: pointer to i40iw_dev struct
788*4882a593Smuzhiyun *
789*4882a593Smuzhiyun * Called by pf driver to initialize hmc_info for vf driver instance.
790*4882a593Smuzhiyun */
i40iw_pf_init_vfhmc(struct i40iw_sc_dev * dev,u8 vf_hmc_fn_id,u32 * vf_cnt_array)791*4882a593Smuzhiyun enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
792*4882a593Smuzhiyun u8 vf_hmc_fn_id,
793*4882a593Smuzhiyun u32 *vf_cnt_array)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info;
796*4882a593Smuzhiyun enum i40iw_status_code ret_code = 0;
797*4882a593Smuzhiyun u32 i;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
800*4882a593Smuzhiyun (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
801*4882a593Smuzhiyun I40IW_MAX_PE_ENABLED_VF_COUNT)) {
802*4882a593Smuzhiyun i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
803*4882a593Smuzhiyun __func__, vf_hmc_fn_id);
804*4882a593Smuzhiyun return I40IW_ERR_INVALID_HMCFN_ID;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
808*4882a593Smuzhiyun if (ret_code)
809*4882a593Smuzhiyun return ret_code;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
814*4882a593Smuzhiyun if (vf_cnt_array)
815*4882a593Smuzhiyun hmc_info->hmc_obj[i].cnt =
816*4882a593Smuzhiyun vf_cnt_array[i - I40IW_HMC_IW_QP];
817*4882a593Smuzhiyun else
818*4882a593Smuzhiyun hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun return 0;
821*4882a593Smuzhiyun }
822