xref: /OK3568_Linux_fs/kernel/drivers/fpga/dfl-fme-pr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for FPGA Management Engine (FME) Partial Reconfiguration
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *   Kang Luwei <luwei.kang@intel.com>
9*4882a593Smuzhiyun  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10*4882a593Smuzhiyun  *   Wu Hao <hao.wu@intel.com>
11*4882a593Smuzhiyun  *   Joseph Grecco <joe.grecco@intel.com>
12*4882a593Smuzhiyun  *   Enno Luebbers <enno.luebbers@intel.com>
13*4882a593Smuzhiyun  *   Tim Whisonant <tim.whisonant@intel.com>
14*4882a593Smuzhiyun  *   Ananda Ravuri <ananda.ravuri@intel.com>
15*4882a593Smuzhiyun  *   Christopher Rauer <christopher.rauer@intel.com>
16*4882a593Smuzhiyun  *   Henry Mitchel <henry.mitchel@intel.com>
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/types.h>
20*4882a593Smuzhiyun #include <linux/device.h>
21*4882a593Smuzhiyun #include <linux/vmalloc.h>
22*4882a593Smuzhiyun #include <linux/uaccess.h>
23*4882a593Smuzhiyun #include <linux/fpga/fpga-mgr.h>
24*4882a593Smuzhiyun #include <linux/fpga/fpga-bridge.h>
25*4882a593Smuzhiyun #include <linux/fpga/fpga-region.h>
26*4882a593Smuzhiyun #include <linux/fpga-dfl.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "dfl.h"
29*4882a593Smuzhiyun #include "dfl-fme.h"
30*4882a593Smuzhiyun #include "dfl-fme-pr.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static struct dfl_fme_region *
dfl_fme_region_find_by_port_id(struct dfl_fme * fme,int port_id)33*4882a593Smuzhiyun dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	struct dfl_fme_region *fme_region;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	list_for_each_entry(fme_region, &fme->region_list, node)
38*4882a593Smuzhiyun 		if (fme_region->port_id == port_id)
39*4882a593Smuzhiyun 			return fme_region;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	return NULL;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
dfl_fme_region_match(struct device * dev,const void * data)44*4882a593Smuzhiyun static int dfl_fme_region_match(struct device *dev, const void *data)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	return dev->parent == data;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
dfl_fme_region_find(struct dfl_fme * fme,int port_id)49*4882a593Smuzhiyun static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct dfl_fme_region *fme_region;
52*4882a593Smuzhiyun 	struct fpga_region *region;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
55*4882a593Smuzhiyun 	if (!fme_region)
56*4882a593Smuzhiyun 		return NULL;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	region = fpga_region_class_find(NULL, &fme_region->region->dev,
59*4882a593Smuzhiyun 					dfl_fme_region_match);
60*4882a593Smuzhiyun 	if (!region)
61*4882a593Smuzhiyun 		return NULL;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return region;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
fme_pr(struct platform_device * pdev,unsigned long arg)66*4882a593Smuzhiyun static int fme_pr(struct platform_device *pdev, unsigned long arg)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
69*4882a593Smuzhiyun 	void __user *argp = (void __user *)arg;
70*4882a593Smuzhiyun 	struct dfl_fpga_fme_port_pr port_pr;
71*4882a593Smuzhiyun 	struct fpga_image_info *info;
72*4882a593Smuzhiyun 	struct fpga_region *region;
73*4882a593Smuzhiyun 	void __iomem *fme_hdr;
74*4882a593Smuzhiyun 	struct dfl_fme *fme;
75*4882a593Smuzhiyun 	unsigned long minsz;
76*4882a593Smuzhiyun 	void *buf = NULL;
77*4882a593Smuzhiyun 	size_t length;
78*4882a593Smuzhiyun 	int ret = 0;
79*4882a593Smuzhiyun 	u64 v;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (copy_from_user(&port_pr, argp, minsz))
84*4882a593Smuzhiyun 		return -EFAULT;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (port_pr.argsz < minsz || port_pr.flags)
87*4882a593Smuzhiyun 		return -EINVAL;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/* get fme header region */
90*4882a593Smuzhiyun 	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
91*4882a593Smuzhiyun 					       FME_FEATURE_ID_HEADER);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* check port id */
94*4882a593Smuzhiyun 	v = readq(fme_hdr + FME_HDR_CAP);
95*4882a593Smuzhiyun 	if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
96*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "port number more than maximum\n");
97*4882a593Smuzhiyun 		return -EINVAL;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/*
101*4882a593Smuzhiyun 	 * align PR buffer per PR bandwidth, as HW ignores the extra padding
102*4882a593Smuzhiyun 	 * data automatically.
103*4882a593Smuzhiyun 	 */
104*4882a593Smuzhiyun 	length = ALIGN(port_pr.buffer_size, 4);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	buf = vmalloc(length);
107*4882a593Smuzhiyun 	if (!buf)
108*4882a593Smuzhiyun 		return -ENOMEM;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (copy_from_user(buf,
111*4882a593Smuzhiyun 			   (void __user *)(unsigned long)port_pr.buffer_address,
112*4882a593Smuzhiyun 			   port_pr.buffer_size)) {
113*4882a593Smuzhiyun 		ret = -EFAULT;
114*4882a593Smuzhiyun 		goto free_exit;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* prepare fpga_image_info for PR */
118*4882a593Smuzhiyun 	info = fpga_image_info_alloc(&pdev->dev);
119*4882a593Smuzhiyun 	if (!info) {
120*4882a593Smuzhiyun 		ret = -ENOMEM;
121*4882a593Smuzhiyun 		goto free_exit;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
127*4882a593Smuzhiyun 	fme = dfl_fpga_pdata_get_private(pdata);
128*4882a593Smuzhiyun 	/* fme device has been unregistered. */
129*4882a593Smuzhiyun 	if (!fme) {
130*4882a593Smuzhiyun 		ret = -EINVAL;
131*4882a593Smuzhiyun 		goto unlock_exit;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	region = dfl_fme_region_find(fme, port_pr.port_id);
135*4882a593Smuzhiyun 	if (!region) {
136*4882a593Smuzhiyun 		ret = -EINVAL;
137*4882a593Smuzhiyun 		goto unlock_exit;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	fpga_image_info_free(region->info);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	info->buf = buf;
143*4882a593Smuzhiyun 	info->count = length;
144*4882a593Smuzhiyun 	info->region_id = port_pr.port_id;
145*4882a593Smuzhiyun 	region->info = info;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	ret = fpga_region_program_fpga(region);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/*
150*4882a593Smuzhiyun 	 * it allows userspace to reset the PR region's logic by disabling and
151*4882a593Smuzhiyun 	 * reenabling the bridge to clear things out between accleration runs.
152*4882a593Smuzhiyun 	 * so no need to hold the bridges after partial reconfiguration.
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	if (region->get_bridges)
155*4882a593Smuzhiyun 		fpga_bridges_put(&region->bridge_list);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	put_device(&region->dev);
158*4882a593Smuzhiyun unlock_exit:
159*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
160*4882a593Smuzhiyun free_exit:
161*4882a593Smuzhiyun 	vfree(buf);
162*4882a593Smuzhiyun 	return ret;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * dfl_fme_create_mgr - create fpga mgr platform device as child device
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * @pdata: fme platform_device's pdata
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Return: mgr platform device if successful, and error code otherwise.
171*4882a593Smuzhiyun  */
172*4882a593Smuzhiyun static struct platform_device *
dfl_fme_create_mgr(struct dfl_feature_platform_data * pdata,struct dfl_feature * feature)173*4882a593Smuzhiyun dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
174*4882a593Smuzhiyun 		   struct dfl_feature *feature)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct platform_device *mgr, *fme = pdata->dev;
177*4882a593Smuzhiyun 	struct dfl_fme_mgr_pdata mgr_pdata;
178*4882a593Smuzhiyun 	int ret = -ENOMEM;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (!feature->ioaddr)
181*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	mgr_pdata.ioaddr = feature->ioaddr;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * Each FME has only one fpga-mgr, so allocate platform device using
187*4882a593Smuzhiyun 	 * the same FME platform device id.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
190*4882a593Smuzhiyun 	if (!mgr)
191*4882a593Smuzhiyun 		return ERR_PTR(ret);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	mgr->dev.parent = &fme->dev;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
196*4882a593Smuzhiyun 	if (ret)
197*4882a593Smuzhiyun 		goto create_mgr_err;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	ret = platform_device_add(mgr);
200*4882a593Smuzhiyun 	if (ret)
201*4882a593Smuzhiyun 		goto create_mgr_err;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return mgr;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun create_mgr_err:
206*4882a593Smuzhiyun 	platform_device_put(mgr);
207*4882a593Smuzhiyun 	return ERR_PTR(ret);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun  * dfl_fme_destroy_mgr - destroy fpga mgr platform device
212*4882a593Smuzhiyun  * @pdata: fme platform device's pdata
213*4882a593Smuzhiyun  */
dfl_fme_destroy_mgr(struct dfl_feature_platform_data * pdata)214*4882a593Smuzhiyun static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	platform_device_unregister(priv->mgr);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * dfl_fme_create_bridge - create fme fpga bridge platform device as child
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * @pdata: fme platform device's pdata
225*4882a593Smuzhiyun  * @port_id: port id for the bridge to be created.
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  * Return: bridge platform device if successful, and error code otherwise.
228*4882a593Smuzhiyun  */
229*4882a593Smuzhiyun static struct dfl_fme_bridge *
dfl_fme_create_bridge(struct dfl_feature_platform_data * pdata,int port_id)230*4882a593Smuzhiyun dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct device *dev = &pdata->dev->dev;
233*4882a593Smuzhiyun 	struct dfl_fme_br_pdata br_pdata;
234*4882a593Smuzhiyun 	struct dfl_fme_bridge *fme_br;
235*4882a593Smuzhiyun 	int ret = -ENOMEM;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
238*4882a593Smuzhiyun 	if (!fme_br)
239*4882a593Smuzhiyun 		return ERR_PTR(ret);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	br_pdata.cdev = pdata->dfl_cdev;
242*4882a593Smuzhiyun 	br_pdata.port_id = port_id;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
245*4882a593Smuzhiyun 					   PLATFORM_DEVID_AUTO);
246*4882a593Smuzhiyun 	if (!fme_br->br)
247*4882a593Smuzhiyun 		return ERR_PTR(ret);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	fme_br->br->dev.parent = dev;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
252*4882a593Smuzhiyun 	if (ret)
253*4882a593Smuzhiyun 		goto create_br_err;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	ret = platform_device_add(fme_br->br);
256*4882a593Smuzhiyun 	if (ret)
257*4882a593Smuzhiyun 		goto create_br_err;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	return fme_br;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun create_br_err:
262*4882a593Smuzhiyun 	platform_device_put(fme_br->br);
263*4882a593Smuzhiyun 	return ERR_PTR(ret);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun  * dfl_fme_destroy_bridge - destroy fpga bridge platform device
268*4882a593Smuzhiyun  * @fme_br: fme bridge to destroy
269*4882a593Smuzhiyun  */
dfl_fme_destroy_bridge(struct dfl_fme_bridge * fme_br)270*4882a593Smuzhiyun static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	platform_device_unregister(fme_br->br);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
277*4882a593Smuzhiyun  * @pdata: fme platform device's pdata
278*4882a593Smuzhiyun  */
dfl_fme_destroy_bridges(struct dfl_feature_platform_data * pdata)279*4882a593Smuzhiyun static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
282*4882a593Smuzhiyun 	struct dfl_fme_bridge *fbridge, *tmp;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
285*4882a593Smuzhiyun 		list_del(&fbridge->node);
286*4882a593Smuzhiyun 		dfl_fme_destroy_bridge(fbridge);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * dfl_fme_create_region - create fpga region platform device as child
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * @pdata: fme platform device's pdata
294*4882a593Smuzhiyun  * @mgr: mgr platform device needed for region
295*4882a593Smuzhiyun  * @br: br platform device needed for region
296*4882a593Smuzhiyun  * @port_id: port id
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * Return: fme region if successful, and error code otherwise.
299*4882a593Smuzhiyun  */
300*4882a593Smuzhiyun static struct dfl_fme_region *
dfl_fme_create_region(struct dfl_feature_platform_data * pdata,struct platform_device * mgr,struct platform_device * br,int port_id)301*4882a593Smuzhiyun dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
302*4882a593Smuzhiyun 		      struct platform_device *mgr,
303*4882a593Smuzhiyun 		      struct platform_device *br, int port_id)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct dfl_fme_region_pdata region_pdata;
306*4882a593Smuzhiyun 	struct device *dev = &pdata->dev->dev;
307*4882a593Smuzhiyun 	struct dfl_fme_region *fme_region;
308*4882a593Smuzhiyun 	int ret = -ENOMEM;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
311*4882a593Smuzhiyun 	if (!fme_region)
312*4882a593Smuzhiyun 		return ERR_PTR(ret);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	region_pdata.mgr = mgr;
315*4882a593Smuzhiyun 	region_pdata.br = br;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * Each FPGA device may have more than one port, so allocate platform
319*4882a593Smuzhiyun 	 * device using the same port platform device id.
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
322*4882a593Smuzhiyun 	if (!fme_region->region)
323*4882a593Smuzhiyun 		return ERR_PTR(ret);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	fme_region->region->dev.parent = dev;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	ret = platform_device_add_data(fme_region->region, &region_pdata,
328*4882a593Smuzhiyun 				       sizeof(region_pdata));
329*4882a593Smuzhiyun 	if (ret)
330*4882a593Smuzhiyun 		goto create_region_err;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	ret = platform_device_add(fme_region->region);
333*4882a593Smuzhiyun 	if (ret)
334*4882a593Smuzhiyun 		goto create_region_err;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	fme_region->port_id = port_id;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return fme_region;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun create_region_err:
341*4882a593Smuzhiyun 	platform_device_put(fme_region->region);
342*4882a593Smuzhiyun 	return ERR_PTR(ret);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * dfl_fme_destroy_region - destroy fme region
347*4882a593Smuzhiyun  * @fme_region: fme region to destroy
348*4882a593Smuzhiyun  */
dfl_fme_destroy_region(struct dfl_fme_region * fme_region)349*4882a593Smuzhiyun static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	platform_device_unregister(fme_region->region);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun  * dfl_fme_destroy_regions - destroy all fme regions
356*4882a593Smuzhiyun  * @pdata: fme platform device's pdata
357*4882a593Smuzhiyun  */
dfl_fme_destroy_regions(struct dfl_feature_platform_data * pdata)358*4882a593Smuzhiyun static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
361*4882a593Smuzhiyun 	struct dfl_fme_region *fme_region, *tmp;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
364*4882a593Smuzhiyun 		list_del(&fme_region->node);
365*4882a593Smuzhiyun 		dfl_fme_destroy_region(fme_region);
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
pr_mgmt_init(struct platform_device * pdev,struct dfl_feature * feature)369*4882a593Smuzhiyun static int pr_mgmt_init(struct platform_device *pdev,
370*4882a593Smuzhiyun 			struct dfl_feature *feature)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
373*4882a593Smuzhiyun 	struct dfl_fme_region *fme_region;
374*4882a593Smuzhiyun 	struct dfl_fme_bridge *fme_br;
375*4882a593Smuzhiyun 	struct platform_device *mgr;
376*4882a593Smuzhiyun 	struct dfl_fme *priv;
377*4882a593Smuzhiyun 	void __iomem *fme_hdr;
378*4882a593Smuzhiyun 	int ret = -ENODEV, i = 0;
379*4882a593Smuzhiyun 	u64 fme_cap, port_offset;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
382*4882a593Smuzhiyun 					       FME_FEATURE_ID_HEADER);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
385*4882a593Smuzhiyun 	priv = dfl_fpga_pdata_get_private(pdata);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	/* Initialize the region and bridge sub device list */
388*4882a593Smuzhiyun 	INIT_LIST_HEAD(&priv->region_list);
389*4882a593Smuzhiyun 	INIT_LIST_HEAD(&priv->bridge_list);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/* Create fpga mgr platform device */
392*4882a593Smuzhiyun 	mgr = dfl_fme_create_mgr(pdata, feature);
393*4882a593Smuzhiyun 	if (IS_ERR(mgr)) {
394*4882a593Smuzhiyun 		dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
395*4882a593Smuzhiyun 		goto unlock;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	priv->mgr = mgr;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Read capability register to check number of regions and bridges */
401*4882a593Smuzhiyun 	fme_cap = readq(fme_hdr + FME_HDR_CAP);
402*4882a593Smuzhiyun 	for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
403*4882a593Smuzhiyun 		port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
404*4882a593Smuzhiyun 		if (!(port_offset & FME_PORT_OFST_IMP))
405*4882a593Smuzhiyun 			continue;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		/* Create bridge for each port */
408*4882a593Smuzhiyun 		fme_br = dfl_fme_create_bridge(pdata, i);
409*4882a593Smuzhiyun 		if (IS_ERR(fme_br)) {
410*4882a593Smuzhiyun 			ret = PTR_ERR(fme_br);
411*4882a593Smuzhiyun 			goto destroy_region;
412*4882a593Smuzhiyun 		}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		list_add(&fme_br->node, &priv->bridge_list);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 		/* Create region for each port */
417*4882a593Smuzhiyun 		fme_region = dfl_fme_create_region(pdata, mgr,
418*4882a593Smuzhiyun 						   fme_br->br, i);
419*4882a593Smuzhiyun 		if (IS_ERR(fme_region)) {
420*4882a593Smuzhiyun 			ret = PTR_ERR(fme_region);
421*4882a593Smuzhiyun 			goto destroy_region;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		list_add(&fme_region->node, &priv->region_list);
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	return 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun destroy_region:
431*4882a593Smuzhiyun 	dfl_fme_destroy_regions(pdata);
432*4882a593Smuzhiyun 	dfl_fme_destroy_bridges(pdata);
433*4882a593Smuzhiyun 	dfl_fme_destroy_mgr(pdata);
434*4882a593Smuzhiyun unlock:
435*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
436*4882a593Smuzhiyun 	return ret;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
pr_mgmt_uinit(struct platform_device * pdev,struct dfl_feature * feature)439*4882a593Smuzhiyun static void pr_mgmt_uinit(struct platform_device *pdev,
440*4882a593Smuzhiyun 			  struct dfl_feature *feature)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	dfl_fme_destroy_regions(pdata);
447*4882a593Smuzhiyun 	dfl_fme_destroy_bridges(pdata);
448*4882a593Smuzhiyun 	dfl_fme_destroy_mgr(pdata);
449*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
fme_pr_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)452*4882a593Smuzhiyun static long fme_pr_ioctl(struct platform_device *pdev,
453*4882a593Smuzhiyun 			 struct dfl_feature *feature,
454*4882a593Smuzhiyun 			 unsigned int cmd, unsigned long arg)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	long ret;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	switch (cmd) {
459*4882a593Smuzhiyun 	case DFL_FPGA_FME_PORT_PR:
460*4882a593Smuzhiyun 		ret = fme_pr(pdev, arg);
461*4882a593Smuzhiyun 		break;
462*4882a593Smuzhiyun 	default:
463*4882a593Smuzhiyun 		ret = -ENODEV;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return ret;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
470*4882a593Smuzhiyun 	{.id = FME_FEATURE_ID_PR_MGMT,},
471*4882a593Smuzhiyun 	{0}
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun const struct dfl_feature_ops fme_pr_mgmt_ops = {
475*4882a593Smuzhiyun 	.init = pr_mgmt_init,
476*4882a593Smuzhiyun 	.uinit = pr_mgmt_uinit,
477*4882a593Smuzhiyun 	.ioctl = fme_pr_ioctl,
478*4882a593Smuzhiyun };
479