1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * AMD 10Gb Ethernet driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is available to you under your choice of the following two
5*4882a593Smuzhiyun * licenses:
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * License 1: GPLv2
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This file is free software; you may copy, redistribute and/or modify
12*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
13*4882a593Smuzhiyun * the Free Software Foundation, either version 2 of the License, or (at
14*4882a593Smuzhiyun * your option) any later version.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * This file is distributed in the hope that it will be useful, but
17*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
18*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19*4882a593Smuzhiyun * General Public License for more details.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
22*4882a593Smuzhiyun * along with this program. If not, see <http://www.gnu.org/licenses/>.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * This file incorporates work covered by the following copyright and
25*4882a593Smuzhiyun * permission notice:
26*4882a593Smuzhiyun * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27*4882a593Smuzhiyun * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28*4882a593Smuzhiyun * Inc. unless otherwise expressly agreed to in writing between Synopsys
29*4882a593Smuzhiyun * and you.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * The Software IS NOT an item of Licensed Software or Licensed Product
32*4882a593Smuzhiyun * under any End User Software License Agreement or Agreement for Licensed
33*4882a593Smuzhiyun * Product with Synopsys or any supplement thereto. Permission is hereby
34*4882a593Smuzhiyun * granted, free of charge, to any person obtaining a copy of this software
35*4882a593Smuzhiyun * annotated with this license and the Software, to deal in the Software
36*4882a593Smuzhiyun * without restriction, including without limitation the rights to use,
37*4882a593Smuzhiyun * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38*4882a593Smuzhiyun * of the Software, and to permit persons to whom the Software is furnished
39*4882a593Smuzhiyun * to do so, subject to the following conditions:
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included
42*4882a593Smuzhiyun * in all copies or substantial portions of the Software.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45*4882a593Smuzhiyun * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46*4882a593Smuzhiyun * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47*4882a593Smuzhiyun * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48*4882a593Smuzhiyun * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51*4882a593Smuzhiyun * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52*4882a593Smuzhiyun * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53*4882a593Smuzhiyun * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54*4882a593Smuzhiyun * THE POSSIBILITY OF SUCH DAMAGE.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * License 2: Modified BSD
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60*4882a593Smuzhiyun * All rights reserved.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
63*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met:
64*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
65*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
66*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
67*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
68*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
69*4882a593Smuzhiyun * * Neither the name of Advanced Micro Devices, Inc. nor the
70*4882a593Smuzhiyun * names of its contributors may be used to endorse or promote products
71*4882a593Smuzhiyun * derived from this software without specific prior written permission.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74*4882a593Smuzhiyun * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75*4882a593Smuzhiyun * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77*4882a593Smuzhiyun * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79*4882a593Smuzhiyun * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80*4882a593Smuzhiyun * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82*4882a593Smuzhiyun * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * This file incorporates work covered by the following copyright and
85*4882a593Smuzhiyun * permission notice:
86*4882a593Smuzhiyun * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87*4882a593Smuzhiyun * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88*4882a593Smuzhiyun * Inc. unless otherwise expressly agreed to in writing between Synopsys
89*4882a593Smuzhiyun * and you.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * The Software IS NOT an item of Licensed Software or Licensed Product
92*4882a593Smuzhiyun * under any End User Software License Agreement or Agreement for Licensed
93*4882a593Smuzhiyun * Product with Synopsys or any supplement thereto. Permission is hereby
94*4882a593Smuzhiyun * granted, free of charge, to any person obtaining a copy of this software
95*4882a593Smuzhiyun * annotated with this license and the Software, to deal in the Software
96*4882a593Smuzhiyun * without restriction, including without limitation the rights to use,
97*4882a593Smuzhiyun * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98*4882a593Smuzhiyun * of the Software, and to permit persons to whom the Software is furnished
99*4882a593Smuzhiyun * to do so, subject to the following conditions:
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included
102*4882a593Smuzhiyun * in all copies or substantial portions of the Software.
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105*4882a593Smuzhiyun * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106*4882a593Smuzhiyun * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107*4882a593Smuzhiyun * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108*4882a593Smuzhiyun * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111*4882a593Smuzhiyun * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112*4882a593Smuzhiyun * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113*4882a593Smuzhiyun * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114*4882a593Smuzhiyun * THE POSSIBILITY OF SUCH DAMAGE.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #include <linux/module.h>
118*4882a593Smuzhiyun #include <linux/device.h>
119*4882a593Smuzhiyun #include <linux/platform_device.h>
120*4882a593Smuzhiyun #include <linux/spinlock.h>
121*4882a593Smuzhiyun #include <linux/netdevice.h>
122*4882a593Smuzhiyun #include <linux/etherdevice.h>
123*4882a593Smuzhiyun #include <linux/io.h>
124*4882a593Smuzhiyun #include <linux/of.h>
125*4882a593Smuzhiyun #include <linux/of_net.h>
126*4882a593Smuzhiyun #include <linux/of_address.h>
127*4882a593Smuzhiyun #include <linux/of_platform.h>
128*4882a593Smuzhiyun #include <linux/of_device.h>
129*4882a593Smuzhiyun #include <linux/clk.h>
130*4882a593Smuzhiyun #include <linux/property.h>
131*4882a593Smuzhiyun #include <linux/acpi.h>
132*4882a593Smuzhiyun #include <linux/mdio.h>
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #include "xgbe.h"
135*4882a593Smuzhiyun #include "xgbe-common.h"
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #ifdef CONFIG_ACPI
138*4882a593Smuzhiyun static const struct acpi_device_id xgbe_acpi_match[];
139*4882a593Smuzhiyun
xgbe_acpi_vdata(struct xgbe_prv_data * pdata)140*4882a593Smuzhiyun static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun const struct acpi_device_id *id;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun id = acpi_match_device(xgbe_acpi_match, pdata->dev);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return id ? (struct xgbe_version_data *)id->driver_data : NULL;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
xgbe_acpi_support(struct xgbe_prv_data * pdata)149*4882a593Smuzhiyun static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct device *dev = pdata->dev;
152*4882a593Smuzhiyun u32 property;
153*4882a593Smuzhiyun int ret;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Obtain the system clock setting */
156*4882a593Smuzhiyun ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
157*4882a593Smuzhiyun if (ret) {
158*4882a593Smuzhiyun dev_err(dev, "unable to obtain %s property\n",
159*4882a593Smuzhiyun XGBE_ACPI_DMA_FREQ);
160*4882a593Smuzhiyun return ret;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun pdata->sysclk_rate = property;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Obtain the PTP clock setting */
165*4882a593Smuzhiyun ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
166*4882a593Smuzhiyun if (ret) {
167*4882a593Smuzhiyun dev_err(dev, "unable to obtain %s property\n",
168*4882a593Smuzhiyun XGBE_ACPI_PTP_FREQ);
169*4882a593Smuzhiyun return ret;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun pdata->ptpclk_rate = property;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun #else /* CONFIG_ACPI */
xgbe_acpi_vdata(struct xgbe_prv_data * pdata)176*4882a593Smuzhiyun static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun return NULL;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
xgbe_acpi_support(struct xgbe_prv_data * pdata)181*4882a593Smuzhiyun static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return -EINVAL;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun #endif /* CONFIG_ACPI */
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #ifdef CONFIG_OF
188*4882a593Smuzhiyun static const struct of_device_id xgbe_of_match[];
189*4882a593Smuzhiyun
xgbe_of_vdata(struct xgbe_prv_data * pdata)190*4882a593Smuzhiyun static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun const struct of_device_id *id;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun id = of_match_device(xgbe_of_match, pdata->dev);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return id ? (struct xgbe_version_data *)id->data : NULL;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
xgbe_of_support(struct xgbe_prv_data * pdata)199*4882a593Smuzhiyun static int xgbe_of_support(struct xgbe_prv_data *pdata)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct device *dev = pdata->dev;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Obtain the system clock setting */
204*4882a593Smuzhiyun pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
205*4882a593Smuzhiyun if (IS_ERR(pdata->sysclk)) {
206*4882a593Smuzhiyun dev_err(dev, "dma devm_clk_get failed\n");
207*4882a593Smuzhiyun return PTR_ERR(pdata->sysclk);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Obtain the PTP clock setting */
212*4882a593Smuzhiyun pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
213*4882a593Smuzhiyun if (IS_ERR(pdata->ptpclk)) {
214*4882a593Smuzhiyun dev_err(dev, "ptp devm_clk_get failed\n");
215*4882a593Smuzhiyun return PTR_ERR(pdata->ptpclk);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
xgbe_of_get_phy_pdev(struct xgbe_prv_data * pdata)222*4882a593Smuzhiyun static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct device *dev = pdata->dev;
225*4882a593Smuzhiyun struct device_node *phy_node;
226*4882a593Smuzhiyun struct platform_device *phy_pdev;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
229*4882a593Smuzhiyun if (phy_node) {
230*4882a593Smuzhiyun /* Old style device tree:
231*4882a593Smuzhiyun * The XGBE and PHY resources are separate
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun phy_pdev = of_find_device_by_node(phy_node);
234*4882a593Smuzhiyun of_node_put(phy_node);
235*4882a593Smuzhiyun } else {
236*4882a593Smuzhiyun /* New style device tree:
237*4882a593Smuzhiyun * The XGBE and PHY resources are grouped together with
238*4882a593Smuzhiyun * the PHY resources listed last
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun get_device(dev);
241*4882a593Smuzhiyun phy_pdev = pdata->platdev;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return phy_pdev;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun #else /* CONFIG_OF */
xgbe_of_vdata(struct xgbe_prv_data * pdata)247*4882a593Smuzhiyun static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun return NULL;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
xgbe_of_support(struct xgbe_prv_data * pdata)252*4882a593Smuzhiyun static int xgbe_of_support(struct xgbe_prv_data *pdata)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun return -EINVAL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
xgbe_of_get_phy_pdev(struct xgbe_prv_data * pdata)257*4882a593Smuzhiyun static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun return NULL;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun #endif /* CONFIG_OF */
262*4882a593Smuzhiyun
xgbe_resource_count(struct platform_device * pdev,unsigned int type)263*4882a593Smuzhiyun static unsigned int xgbe_resource_count(struct platform_device *pdev,
264*4882a593Smuzhiyun unsigned int type)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun unsigned int count;
267*4882a593Smuzhiyun int i;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun for (i = 0, count = 0; i < pdev->num_resources; i++) {
270*4882a593Smuzhiyun struct resource *res = &pdev->resource[i];
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (type == resource_type(res))
273*4882a593Smuzhiyun count++;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return count;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
xgbe_get_phy_pdev(struct xgbe_prv_data * pdata)279*4882a593Smuzhiyun static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct platform_device *phy_pdev;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (pdata->use_acpi) {
284*4882a593Smuzhiyun get_device(pdata->dev);
285*4882a593Smuzhiyun phy_pdev = pdata->platdev;
286*4882a593Smuzhiyun } else {
287*4882a593Smuzhiyun phy_pdev = xgbe_of_get_phy_pdev(pdata);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return phy_pdev;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
xgbe_get_vdata(struct xgbe_prv_data * pdata)293*4882a593Smuzhiyun static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
296*4882a593Smuzhiyun : xgbe_of_vdata(pdata);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
xgbe_platform_probe(struct platform_device * pdev)299*4882a593Smuzhiyun static int xgbe_platform_probe(struct platform_device *pdev)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct xgbe_prv_data *pdata;
302*4882a593Smuzhiyun struct device *dev = &pdev->dev;
303*4882a593Smuzhiyun struct platform_device *phy_pdev;
304*4882a593Smuzhiyun const char *phy_mode;
305*4882a593Smuzhiyun unsigned int phy_memnum, phy_irqnum;
306*4882a593Smuzhiyun unsigned int dma_irqnum, dma_irqend;
307*4882a593Smuzhiyun enum dev_dma_attr attr;
308*4882a593Smuzhiyun int ret;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun pdata = xgbe_alloc_pdata(dev);
311*4882a593Smuzhiyun if (IS_ERR(pdata)) {
312*4882a593Smuzhiyun ret = PTR_ERR(pdata);
313*4882a593Smuzhiyun goto err_alloc;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun pdata->platdev = pdev;
317*4882a593Smuzhiyun pdata->adev = ACPI_COMPANION(dev);
318*4882a593Smuzhiyun platform_set_drvdata(pdev, pdata);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Check if we should use ACPI or DT */
321*4882a593Smuzhiyun pdata->use_acpi = dev->of_node ? 0 : 1;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Get the version data */
324*4882a593Smuzhiyun pdata->vdata = xgbe_get_vdata(pdata);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun phy_pdev = xgbe_get_phy_pdev(pdata);
327*4882a593Smuzhiyun if (!phy_pdev) {
328*4882a593Smuzhiyun dev_err(dev, "unable to obtain phy device\n");
329*4882a593Smuzhiyun ret = -EINVAL;
330*4882a593Smuzhiyun goto err_phydev;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun pdata->phy_platdev = phy_pdev;
333*4882a593Smuzhiyun pdata->phy_dev = &phy_pdev->dev;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (pdev == phy_pdev) {
336*4882a593Smuzhiyun /* New style device tree or ACPI:
337*4882a593Smuzhiyun * The XGBE and PHY resources are grouped together with
338*4882a593Smuzhiyun * the PHY resources listed last
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
341*4882a593Smuzhiyun phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
342*4882a593Smuzhiyun dma_irqnum = 1;
343*4882a593Smuzhiyun dma_irqend = phy_irqnum;
344*4882a593Smuzhiyun } else {
345*4882a593Smuzhiyun /* Old style device tree:
346*4882a593Smuzhiyun * The XGBE and PHY resources are separate
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun phy_memnum = 0;
349*4882a593Smuzhiyun phy_irqnum = 0;
350*4882a593Smuzhiyun dma_irqnum = 1;
351*4882a593Smuzhiyun dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Obtain the mmio areas for the device */
355*4882a593Smuzhiyun pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
356*4882a593Smuzhiyun if (IS_ERR(pdata->xgmac_regs)) {
357*4882a593Smuzhiyun dev_err(dev, "xgmac ioremap failed\n");
358*4882a593Smuzhiyun ret = PTR_ERR(pdata->xgmac_regs);
359*4882a593Smuzhiyun goto err_io;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun if (netif_msg_probe(pdata))
362*4882a593Smuzhiyun dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
365*4882a593Smuzhiyun if (IS_ERR(pdata->xpcs_regs)) {
366*4882a593Smuzhiyun dev_err(dev, "xpcs ioremap failed\n");
367*4882a593Smuzhiyun ret = PTR_ERR(pdata->xpcs_regs);
368*4882a593Smuzhiyun goto err_io;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun if (netif_msg_probe(pdata))
371*4882a593Smuzhiyun dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
374*4882a593Smuzhiyun phy_memnum++);
375*4882a593Smuzhiyun if (IS_ERR(pdata->rxtx_regs)) {
376*4882a593Smuzhiyun dev_err(dev, "rxtx ioremap failed\n");
377*4882a593Smuzhiyun ret = PTR_ERR(pdata->rxtx_regs);
378*4882a593Smuzhiyun goto err_io;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun if (netif_msg_probe(pdata))
381*4882a593Smuzhiyun dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
384*4882a593Smuzhiyun phy_memnum++);
385*4882a593Smuzhiyun if (IS_ERR(pdata->sir0_regs)) {
386*4882a593Smuzhiyun dev_err(dev, "sir0 ioremap failed\n");
387*4882a593Smuzhiyun ret = PTR_ERR(pdata->sir0_regs);
388*4882a593Smuzhiyun goto err_io;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun if (netif_msg_probe(pdata))
391*4882a593Smuzhiyun dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
394*4882a593Smuzhiyun phy_memnum++);
395*4882a593Smuzhiyun if (IS_ERR(pdata->sir1_regs)) {
396*4882a593Smuzhiyun dev_err(dev, "sir1 ioremap failed\n");
397*4882a593Smuzhiyun ret = PTR_ERR(pdata->sir1_regs);
398*4882a593Smuzhiyun goto err_io;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun if (netif_msg_probe(pdata))
401*4882a593Smuzhiyun dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Retrieve the MAC address */
404*4882a593Smuzhiyun ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
405*4882a593Smuzhiyun pdata->mac_addr,
406*4882a593Smuzhiyun sizeof(pdata->mac_addr));
407*4882a593Smuzhiyun if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
408*4882a593Smuzhiyun dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
409*4882a593Smuzhiyun if (!ret)
410*4882a593Smuzhiyun ret = -EINVAL;
411*4882a593Smuzhiyun goto err_io;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Retrieve the PHY mode - it must be "xgmii" */
415*4882a593Smuzhiyun ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
416*4882a593Smuzhiyun &phy_mode);
417*4882a593Smuzhiyun if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
418*4882a593Smuzhiyun dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
419*4882a593Smuzhiyun if (!ret)
420*4882a593Smuzhiyun ret = -EINVAL;
421*4882a593Smuzhiyun goto err_io;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* Check for per channel interrupt support */
426*4882a593Smuzhiyun if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
427*4882a593Smuzhiyun pdata->per_channel_irq = 1;
428*4882a593Smuzhiyun pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Obtain device settings unique to ACPI/OF */
432*4882a593Smuzhiyun if (pdata->use_acpi)
433*4882a593Smuzhiyun ret = xgbe_acpi_support(pdata);
434*4882a593Smuzhiyun else
435*4882a593Smuzhiyun ret = xgbe_of_support(pdata);
436*4882a593Smuzhiyun if (ret)
437*4882a593Smuzhiyun goto err_io;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Set the DMA coherency values */
440*4882a593Smuzhiyun attr = device_get_dma_attr(dev);
441*4882a593Smuzhiyun if (attr == DEV_DMA_NOT_SUPPORTED) {
442*4882a593Smuzhiyun dev_err(dev, "DMA is not supported");
443*4882a593Smuzhiyun ret = -ENODEV;
444*4882a593Smuzhiyun goto err_io;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun pdata->coherent = (attr == DEV_DMA_COHERENT);
447*4882a593Smuzhiyun if (pdata->coherent) {
448*4882a593Smuzhiyun pdata->arcr = XGBE_DMA_OS_ARCR;
449*4882a593Smuzhiyun pdata->awcr = XGBE_DMA_OS_AWCR;
450*4882a593Smuzhiyun } else {
451*4882a593Smuzhiyun pdata->arcr = XGBE_DMA_SYS_ARCR;
452*4882a593Smuzhiyun pdata->awcr = XGBE_DMA_SYS_AWCR;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* Set the maximum fifo amounts */
456*4882a593Smuzhiyun pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
457*4882a593Smuzhiyun pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Set the hardware channel and queue counts */
460*4882a593Smuzhiyun xgbe_set_counts(pdata);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Always have XGMAC and XPCS (auto-negotiation) interrupts */
463*4882a593Smuzhiyun pdata->irq_count = 2;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Get the device interrupt */
466*4882a593Smuzhiyun ret = platform_get_irq(pdev, 0);
467*4882a593Smuzhiyun if (ret < 0)
468*4882a593Smuzhiyun goto err_io;
469*4882a593Smuzhiyun pdata->dev_irq = ret;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* Get the per channel DMA interrupts */
472*4882a593Smuzhiyun if (pdata->per_channel_irq) {
473*4882a593Smuzhiyun unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
476*4882a593Smuzhiyun ret = platform_get_irq(pdata->platdev, dma_irqnum++);
477*4882a593Smuzhiyun if (ret < 0)
478*4882a593Smuzhiyun goto err_io;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun pdata->channel_irq[i] = ret;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun pdata->channel_irq_count = max;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun pdata->irq_count += max;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* Get the auto-negotiation interrupt */
489*4882a593Smuzhiyun ret = platform_get_irq(phy_pdev, phy_irqnum++);
490*4882a593Smuzhiyun if (ret < 0)
491*4882a593Smuzhiyun goto err_io;
492*4882a593Smuzhiyun pdata->an_irq = ret;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /* Configure the netdev resource */
495*4882a593Smuzhiyun ret = xgbe_config_netdev(pdata);
496*4882a593Smuzhiyun if (ret)
497*4882a593Smuzhiyun goto err_io;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun netdev_notice(pdata->netdev, "net device enabled\n");
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun err_io:
504*4882a593Smuzhiyun platform_device_put(phy_pdev);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun err_phydev:
507*4882a593Smuzhiyun xgbe_free_pdata(pdata);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun err_alloc:
510*4882a593Smuzhiyun dev_notice(dev, "net device not enabled\n");
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return ret;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
xgbe_platform_remove(struct platform_device * pdev)515*4882a593Smuzhiyun static int xgbe_platform_remove(struct platform_device *pdev)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun xgbe_deconfig_netdev(pdata);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun platform_device_put(pdata->phy_platdev);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun xgbe_free_pdata(pdata);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun return 0;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
xgbe_platform_suspend(struct device * dev)529*4882a593Smuzhiyun static int xgbe_platform_suspend(struct device *dev)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
532*4882a593Smuzhiyun struct net_device *netdev = pdata->netdev;
533*4882a593Smuzhiyun int ret = 0;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun DBGPR("-->xgbe_suspend\n");
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (netif_running(netdev))
538*4882a593Smuzhiyun ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
541*4882a593Smuzhiyun pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
542*4882a593Smuzhiyun XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun DBGPR("<--xgbe_suspend\n");
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return ret;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
xgbe_platform_resume(struct device * dev)549*4882a593Smuzhiyun static int xgbe_platform_resume(struct device *dev)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
552*4882a593Smuzhiyun struct net_device *netdev = pdata->netdev;
553*4882a593Smuzhiyun int ret = 0;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun DBGPR("-->xgbe_resume\n");
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
558*4882a593Smuzhiyun XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (netif_running(netdev)) {
561*4882a593Smuzhiyun ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* Schedule a restart in case the link or phy state changed
564*4882a593Smuzhiyun * while we were powered down.
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun schedule_work(&pdata->restart_work);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun DBGPR("<--xgbe_resume\n");
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun return ret;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun static const struct xgbe_version_data xgbe_v1 = {
576*4882a593Smuzhiyun .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
577*4882a593Smuzhiyun .xpcs_access = XGBE_XPCS_ACCESS_V1,
578*4882a593Smuzhiyun .tx_max_fifo_size = 81920,
579*4882a593Smuzhiyun .rx_max_fifo_size = 81920,
580*4882a593Smuzhiyun .tx_tstamp_workaround = 1,
581*4882a593Smuzhiyun };
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun #ifdef CONFIG_ACPI
584*4882a593Smuzhiyun static const struct acpi_device_id xgbe_acpi_match[] = {
585*4882a593Smuzhiyun { .id = "AMDI8001",
586*4882a593Smuzhiyun .driver_data = (kernel_ulong_t)&xgbe_v1 },
587*4882a593Smuzhiyun {},
588*4882a593Smuzhiyun };
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
591*4882a593Smuzhiyun #endif
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun #ifdef CONFIG_OF
594*4882a593Smuzhiyun static const struct of_device_id xgbe_of_match[] = {
595*4882a593Smuzhiyun { .compatible = "amd,xgbe-seattle-v1a",
596*4882a593Smuzhiyun .data = &xgbe_v1 },
597*4882a593Smuzhiyun {},
598*4882a593Smuzhiyun };
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, xgbe_of_match);
601*4882a593Smuzhiyun #endif
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
604*4882a593Smuzhiyun xgbe_platform_suspend, xgbe_platform_resume);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun static struct platform_driver xgbe_driver = {
607*4882a593Smuzhiyun .driver = {
608*4882a593Smuzhiyun .name = XGBE_DRV_NAME,
609*4882a593Smuzhiyun #ifdef CONFIG_ACPI
610*4882a593Smuzhiyun .acpi_match_table = xgbe_acpi_match,
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun #ifdef CONFIG_OF
613*4882a593Smuzhiyun .of_match_table = xgbe_of_match,
614*4882a593Smuzhiyun #endif
615*4882a593Smuzhiyun .pm = &xgbe_platform_pm_ops,
616*4882a593Smuzhiyun },
617*4882a593Smuzhiyun .probe = xgbe_platform_probe,
618*4882a593Smuzhiyun .remove = xgbe_platform_remove,
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun
xgbe_platform_init(void)621*4882a593Smuzhiyun int xgbe_platform_init(void)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun return platform_driver_register(&xgbe_driver);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
xgbe_platform_exit(void)626*4882a593Smuzhiyun void xgbe_platform_exit(void)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun platform_driver_unregister(&xgbe_driver);
629*4882a593Smuzhiyun }
630