1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) "tegra-xusb-padctl: " fmt
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <errno.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "xusb-padctl-common.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <asm/arch/clock.h>
15*4882a593Smuzhiyun
tegra_xusb_phy_prepare(struct tegra_xusb_phy * phy)16*4882a593Smuzhiyun int tegra_xusb_phy_prepare(struct tegra_xusb_phy *phy)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun if (phy && phy->ops && phy->ops->prepare)
19*4882a593Smuzhiyun return phy->ops->prepare(phy);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun return phy ? -ENOSYS : -EINVAL;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
tegra_xusb_phy_enable(struct tegra_xusb_phy * phy)24*4882a593Smuzhiyun int tegra_xusb_phy_enable(struct tegra_xusb_phy *phy)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun if (phy && phy->ops && phy->ops->enable)
27*4882a593Smuzhiyun return phy->ops->enable(phy);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun return phy ? -ENOSYS : -EINVAL;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
tegra_xusb_phy_disable(struct tegra_xusb_phy * phy)32*4882a593Smuzhiyun int tegra_xusb_phy_disable(struct tegra_xusb_phy *phy)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun if (phy && phy->ops && phy->ops->disable)
35*4882a593Smuzhiyun return phy->ops->disable(phy);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return phy ? -ENOSYS : -EINVAL;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
tegra_xusb_phy_unprepare(struct tegra_xusb_phy * phy)40*4882a593Smuzhiyun int tegra_xusb_phy_unprepare(struct tegra_xusb_phy *phy)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (phy && phy->ops && phy->ops->unprepare)
43*4882a593Smuzhiyun return phy->ops->unprepare(phy);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return phy ? -ENOSYS : -EINVAL;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
tegra_xusb_phy_get(unsigned int type)48*4882a593Smuzhiyun struct tegra_xusb_phy *tegra_xusb_phy_get(unsigned int type)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct tegra_xusb_phy *phy;
51*4882a593Smuzhiyun int i;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun for (i = 0; i < padctl.socdata->num_phys; i++) {
54*4882a593Smuzhiyun phy = &padctl.socdata->phys[i];
55*4882a593Smuzhiyun if (phy->type != type)
56*4882a593Smuzhiyun continue;
57*4882a593Smuzhiyun return phy;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun return NULL;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static const struct tegra_xusb_padctl_lane *
tegra_xusb_padctl_find_lane(struct tegra_xusb_padctl * padctl,const char * name)64*4882a593Smuzhiyun tegra_xusb_padctl_find_lane(struct tegra_xusb_padctl *padctl, const char *name)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun unsigned int i;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun for (i = 0; i < padctl->socdata->num_lanes; i++)
69*4882a593Smuzhiyun if (strcmp(name, padctl->socdata->lanes[i].name) == 0)
70*4882a593Smuzhiyun return &padctl->socdata->lanes[i];
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun return NULL;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static int
tegra_xusb_padctl_group_parse_dt(struct tegra_xusb_padctl * padctl,struct tegra_xusb_padctl_group * group,ofnode node)76*4882a593Smuzhiyun tegra_xusb_padctl_group_parse_dt(struct tegra_xusb_padctl *padctl,
77*4882a593Smuzhiyun struct tegra_xusb_padctl_group *group,
78*4882a593Smuzhiyun ofnode node)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun unsigned int i;
81*4882a593Smuzhiyun int len, ret;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun group->name = ofnode_get_name(node);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun len = ofnode_read_string_count(node, "nvidia,lanes");
86*4882a593Smuzhiyun if (len < 0) {
87*4882a593Smuzhiyun pr_err("failed to parse \"nvidia,lanes\" property");
88*4882a593Smuzhiyun return -EINVAL;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun group->num_pins = len;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun for (i = 0; i < group->num_pins; i++) {
94*4882a593Smuzhiyun ret = ofnode_read_string_index(node, "nvidia,lanes", i,
95*4882a593Smuzhiyun &group->pins[i]);
96*4882a593Smuzhiyun if (ret) {
97*4882a593Smuzhiyun pr_err("failed to read string from \"nvidia,lanes\" property");
98*4882a593Smuzhiyun return -EINVAL;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun group->num_pins = len;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun ret = ofnode_read_string_index(node, "nvidia,function", 0,
105*4882a593Smuzhiyun &group->func);
106*4882a593Smuzhiyun if (ret) {
107*4882a593Smuzhiyun pr_err("failed to parse \"nvidia,func\" property");
108*4882a593Smuzhiyun return -EINVAL;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun group->iddq = ofnode_read_u32_default(node, "nvidia,iddq", -1);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
tegra_xusb_padctl_find_function(struct tegra_xusb_padctl * padctl,const char * name)116*4882a593Smuzhiyun static int tegra_xusb_padctl_find_function(struct tegra_xusb_padctl *padctl,
117*4882a593Smuzhiyun const char *name)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun unsigned int i;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun for (i = 0; i < padctl->socdata->num_functions; i++)
122*4882a593Smuzhiyun if (strcmp(name, padctl->socdata->functions[i]) == 0)
123*4882a593Smuzhiyun return i;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return -ENOENT;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun static int
tegra_xusb_padctl_lane_find_function(struct tegra_xusb_padctl * padctl,const struct tegra_xusb_padctl_lane * lane,const char * name)129*4882a593Smuzhiyun tegra_xusb_padctl_lane_find_function(struct tegra_xusb_padctl *padctl,
130*4882a593Smuzhiyun const struct tegra_xusb_padctl_lane *lane,
131*4882a593Smuzhiyun const char *name)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun unsigned int i;
134*4882a593Smuzhiyun int func;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun func = tegra_xusb_padctl_find_function(padctl, name);
137*4882a593Smuzhiyun if (func < 0)
138*4882a593Smuzhiyun return func;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun for (i = 0; i < lane->num_funcs; i++)
141*4882a593Smuzhiyun if (lane->funcs[i] == func)
142*4882a593Smuzhiyun return i;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return -ENOENT;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun static int
tegra_xusb_padctl_group_apply(struct tegra_xusb_padctl * padctl,const struct tegra_xusb_padctl_group * group)148*4882a593Smuzhiyun tegra_xusb_padctl_group_apply(struct tegra_xusb_padctl *padctl,
149*4882a593Smuzhiyun const struct tegra_xusb_padctl_group *group)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun unsigned int i;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun for (i = 0; i < group->num_pins; i++) {
154*4882a593Smuzhiyun const struct tegra_xusb_padctl_lane *lane;
155*4882a593Smuzhiyun unsigned int func;
156*4882a593Smuzhiyun u32 value;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun lane = tegra_xusb_padctl_find_lane(padctl, group->pins[i]);
159*4882a593Smuzhiyun if (!lane) {
160*4882a593Smuzhiyun pr_err("no lane for pin %s", group->pins[i]);
161*4882a593Smuzhiyun continue;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun func = tegra_xusb_padctl_lane_find_function(padctl, lane,
165*4882a593Smuzhiyun group->func);
166*4882a593Smuzhiyun if (func < 0) {
167*4882a593Smuzhiyun pr_err("function %s invalid for lane %s: %d",
168*4882a593Smuzhiyun group->func, lane->name, func);
169*4882a593Smuzhiyun continue;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun value = padctl_readl(padctl, lane->offset);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* set pin function */
175*4882a593Smuzhiyun value &= ~(lane->mask << lane->shift);
176*4882a593Smuzhiyun value |= func << lane->shift;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Set IDDQ if supported on the lane and specified in the
180*4882a593Smuzhiyun * configuration.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun if (lane->iddq > 0 && group->iddq >= 0) {
183*4882a593Smuzhiyun if (group->iddq != 0)
184*4882a593Smuzhiyun value &= ~(1 << lane->iddq);
185*4882a593Smuzhiyun else
186*4882a593Smuzhiyun value |= 1 << lane->iddq;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun padctl_writel(padctl, value, lane->offset);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun static int
tegra_xusb_padctl_config_apply(struct tegra_xusb_padctl * padctl,struct tegra_xusb_padctl_config * config)196*4882a593Smuzhiyun tegra_xusb_padctl_config_apply(struct tegra_xusb_padctl *padctl,
197*4882a593Smuzhiyun struct tegra_xusb_padctl_config *config)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun unsigned int i;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < config->num_groups; i++) {
202*4882a593Smuzhiyun const struct tegra_xusb_padctl_group *group;
203*4882a593Smuzhiyun int err;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun group = &config->groups[i];
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun err = tegra_xusb_padctl_group_apply(padctl, group);
208*4882a593Smuzhiyun if (err < 0) {
209*4882a593Smuzhiyun pr_err("failed to apply group %s: %d",
210*4882a593Smuzhiyun group->name, err);
211*4882a593Smuzhiyun continue;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun static int
tegra_xusb_padctl_config_parse_dt(struct tegra_xusb_padctl * padctl,struct tegra_xusb_padctl_config * config,ofnode node)219*4882a593Smuzhiyun tegra_xusb_padctl_config_parse_dt(struct tegra_xusb_padctl *padctl,
220*4882a593Smuzhiyun struct tegra_xusb_padctl_config *config,
221*4882a593Smuzhiyun ofnode node)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun ofnode subnode;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun config->name = ofnode_get_name(node);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun ofnode_for_each_subnode(subnode, node) {
228*4882a593Smuzhiyun struct tegra_xusb_padctl_group *group;
229*4882a593Smuzhiyun int err;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun group = &config->groups[config->num_groups];
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun err = tegra_xusb_padctl_group_parse_dt(padctl, group, subnode);
234*4882a593Smuzhiyun if (err < 0) {
235*4882a593Smuzhiyun pr_err("failed to parse group %s", group->name);
236*4882a593Smuzhiyun return err;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun config->num_groups++;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return 0;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
tegra_xusb_padctl_parse_dt(struct tegra_xusb_padctl * padctl,ofnode node)245*4882a593Smuzhiyun static int tegra_xusb_padctl_parse_dt(struct tegra_xusb_padctl *padctl,
246*4882a593Smuzhiyun ofnode node)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun ofnode subnode;
249*4882a593Smuzhiyun int err;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun err = ofnode_read_resource(node, 0, &padctl->regs);
252*4882a593Smuzhiyun if (err < 0) {
253*4882a593Smuzhiyun pr_err("registers not found");
254*4882a593Smuzhiyun return err;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun ofnode_for_each_subnode(subnode, node) {
258*4882a593Smuzhiyun struct tegra_xusb_padctl_config *config = &padctl->config;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun debug("%s: subnode=%s\n", __func__, ofnode_get_name(subnode));
261*4882a593Smuzhiyun err = tegra_xusb_padctl_config_parse_dt(padctl, config,
262*4882a593Smuzhiyun subnode);
263*4882a593Smuzhiyun if (err < 0) {
264*4882a593Smuzhiyun pr_err("failed to parse entry %s: %d",
265*4882a593Smuzhiyun config->name, err);
266*4882a593Smuzhiyun continue;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun debug("%s: done\n", __func__);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun struct tegra_xusb_padctl padctl;
275*4882a593Smuzhiyun
tegra_xusb_process_nodes(ofnode nodes[],unsigned int count,const struct tegra_xusb_padctl_soc * socdata)276*4882a593Smuzhiyun int tegra_xusb_process_nodes(ofnode nodes[], unsigned int count,
277*4882a593Smuzhiyun const struct tegra_xusb_padctl_soc *socdata)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned int i;
280*4882a593Smuzhiyun int err;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun debug("%s: count=%d\n", __func__, count);
283*4882a593Smuzhiyun for (i = 0; i < count; i++) {
284*4882a593Smuzhiyun debug("%s: i=%d, node=%p\n", __func__, i, nodes[i].np);
285*4882a593Smuzhiyun if (!ofnode_is_available(nodes[i]))
286*4882a593Smuzhiyun continue;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun padctl.socdata = socdata;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun err = tegra_xusb_padctl_parse_dt(&padctl, nodes[i]);
291*4882a593Smuzhiyun if (err < 0) {
292*4882a593Smuzhiyun pr_err("failed to parse DT: %d", err);
293*4882a593Smuzhiyun continue;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* deassert XUSB padctl reset */
297*4882a593Smuzhiyun reset_set_enable(PERIPH_ID_XUSB_PADCTL, 0);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun err = tegra_xusb_padctl_config_apply(&padctl, &padctl.config);
300*4882a593Smuzhiyun if (err < 0) {
301*4882a593Smuzhiyun pr_err("failed to apply pinmux: %d", err);
302*4882a593Smuzhiyun continue;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* only a single instance is supported */
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun debug("%s: done\n", __func__);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun }
312