1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * System Control and Power Interface (SCMI) Protocol based clock driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2018-2020 ARM Ltd.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk-provider.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/scmi_protocol.h>
14*4882a593Smuzhiyun #include <asm/div64.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static const struct scmi_clk_proto_ops *clk_ops;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct scmi_clk {
19*4882a593Smuzhiyun u32 id;
20*4882a593Smuzhiyun struct clk_hw hw;
21*4882a593Smuzhiyun const struct scmi_clock_info *info;
22*4882a593Smuzhiyun const struct scmi_protocol_handle *ph;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
26*4882a593Smuzhiyun
scmi_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)27*4882a593Smuzhiyun static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
28*4882a593Smuzhiyun unsigned long parent_rate)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun int ret;
31*4882a593Smuzhiyun u64 rate;
32*4882a593Smuzhiyun struct scmi_clk *clk = to_scmi_clk(hw);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun ret = clk_ops->rate_get(clk->ph, clk->id, &rate);
35*4882a593Smuzhiyun if (ret)
36*4882a593Smuzhiyun return 0;
37*4882a593Smuzhiyun return rate;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
scmi_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)40*4882a593Smuzhiyun static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
41*4882a593Smuzhiyun unsigned long *parent_rate)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun u64 fmin, fmax, ftmp;
44*4882a593Smuzhiyun struct scmi_clk *clk = to_scmi_clk(hw);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * We can't figure out what rate it will be, so just return the
48*4882a593Smuzhiyun * rate back to the caller. scmi_clk_recalc_rate() will be called
49*4882a593Smuzhiyun * after the rate is set and we'll know what rate the clock is
50*4882a593Smuzhiyun * running at then.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun if (clk->info->rate_discrete)
53*4882a593Smuzhiyun return rate;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun fmin = clk->info->range.min_rate;
56*4882a593Smuzhiyun fmax = clk->info->range.max_rate;
57*4882a593Smuzhiyun if (rate <= fmin)
58*4882a593Smuzhiyun return fmin;
59*4882a593Smuzhiyun else if (rate >= fmax)
60*4882a593Smuzhiyun return fmax;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ftmp = rate - fmin;
63*4882a593Smuzhiyun ftmp += clk->info->range.step_size - 1; /* to round up */
64*4882a593Smuzhiyun do_div(ftmp, clk->info->range.step_size);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return ftmp * clk->info->range.step_size + fmin;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
scmi_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)69*4882a593Smuzhiyun static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
70*4882a593Smuzhiyun unsigned long parent_rate)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct scmi_clk *clk = to_scmi_clk(hw);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun return clk_ops->rate_set(clk->ph, clk->id, rate);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
scmi_clk_enable(struct clk_hw * hw)77*4882a593Smuzhiyun static int scmi_clk_enable(struct clk_hw *hw)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct scmi_clk *clk = to_scmi_clk(hw);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return clk_ops->enable(clk->ph, clk->id);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
scmi_clk_disable(struct clk_hw * hw)84*4882a593Smuzhiyun static void scmi_clk_disable(struct clk_hw *hw)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct scmi_clk *clk = to_scmi_clk(hw);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun clk_ops->disable(clk->ph, clk->id);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun static const struct clk_ops scmi_clk_ops = {
92*4882a593Smuzhiyun .recalc_rate = scmi_clk_recalc_rate,
93*4882a593Smuzhiyun .round_rate = scmi_clk_round_rate,
94*4882a593Smuzhiyun .set_rate = scmi_clk_set_rate,
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * We can't provide enable/disable callback as we can't perform the same
97*4882a593Smuzhiyun * in atomic context. Since the clock framework provides standard API
98*4882a593Smuzhiyun * clk_prepare_enable that helps cases using clk_enable in non-atomic
99*4882a593Smuzhiyun * context, it should be fine providing prepare/unprepare.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun .prepare = scmi_clk_enable,
102*4882a593Smuzhiyun .unprepare = scmi_clk_disable,
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun
scmi_clk_ops_init(struct device * dev,struct scmi_clk * sclk)105*4882a593Smuzhiyun static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun int ret;
108*4882a593Smuzhiyun unsigned long min_rate, max_rate;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun struct clk_init_data init = {
111*4882a593Smuzhiyun .flags = CLK_GET_RATE_NOCACHE,
112*4882a593Smuzhiyun .num_parents = 0,
113*4882a593Smuzhiyun .ops = &scmi_clk_ops,
114*4882a593Smuzhiyun .name = sclk->info->name,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun sclk->hw.init = &init;
118*4882a593Smuzhiyun ret = devm_clk_hw_register(dev, &sclk->hw);
119*4882a593Smuzhiyun if (ret)
120*4882a593Smuzhiyun return ret;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (sclk->info->rate_discrete) {
123*4882a593Smuzhiyun int num_rates = sclk->info->list.num_rates;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (num_rates <= 0)
126*4882a593Smuzhiyun return -EINVAL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun min_rate = sclk->info->list.rates[0];
129*4882a593Smuzhiyun max_rate = sclk->info->list.rates[num_rates - 1];
130*4882a593Smuzhiyun } else {
131*4882a593Smuzhiyun min_rate = sclk->info->range.min_rate;
132*4882a593Smuzhiyun max_rate = sclk->info->range.max_rate;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
136*4882a593Smuzhiyun return ret;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
scmi_clocks_probe(struct scmi_device * sdev)139*4882a593Smuzhiyun static int scmi_clocks_probe(struct scmi_device *sdev)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun int idx, count, err;
142*4882a593Smuzhiyun struct clk_hw **hws;
143*4882a593Smuzhiyun struct clk_hw_onecell_data *clk_data;
144*4882a593Smuzhiyun struct device *dev = &sdev->dev;
145*4882a593Smuzhiyun struct device_node *np = dev->of_node;
146*4882a593Smuzhiyun const struct scmi_handle *handle = sdev->handle;
147*4882a593Smuzhiyun struct scmi_protocol_handle *ph;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!handle)
150*4882a593Smuzhiyun return -ENODEV;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun clk_ops = handle->devm_get_protocol(sdev, SCMI_PROTOCOL_CLOCK, &ph);
153*4882a593Smuzhiyun if (IS_ERR(clk_ops))
154*4882a593Smuzhiyun return PTR_ERR(clk_ops);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun count = clk_ops->count_get(ph);
157*4882a593Smuzhiyun if (count < 0) {
158*4882a593Smuzhiyun dev_err(dev, "%pOFn: invalid clock output count\n", np);
159*4882a593Smuzhiyun return -EINVAL;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
163*4882a593Smuzhiyun GFP_KERNEL);
164*4882a593Smuzhiyun if (!clk_data)
165*4882a593Smuzhiyun return -ENOMEM;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun clk_data->num = count;
168*4882a593Smuzhiyun hws = clk_data->hws;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (idx = 0; idx < count; idx++) {
171*4882a593Smuzhiyun struct scmi_clk *sclk;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
174*4882a593Smuzhiyun if (!sclk)
175*4882a593Smuzhiyun return -ENOMEM;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun sclk->info = clk_ops->info_get(ph, idx);
178*4882a593Smuzhiyun if (!sclk->info) {
179*4882a593Smuzhiyun dev_dbg(dev, "invalid clock info for idx %d\n", idx);
180*4882a593Smuzhiyun continue;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun sclk->id = idx;
184*4882a593Smuzhiyun sclk->ph = ph;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun err = scmi_clk_ops_init(dev, sclk);
187*4882a593Smuzhiyun if (err) {
188*4882a593Smuzhiyun dev_err(dev, "failed to register clock %d\n", idx);
189*4882a593Smuzhiyun devm_kfree(dev, sclk);
190*4882a593Smuzhiyun hws[idx] = NULL;
191*4882a593Smuzhiyun } else {
192*4882a593Smuzhiyun dev_dbg(dev, "Registered clock:%s\n", sclk->info->name);
193*4882a593Smuzhiyun hws[idx] = &sclk->hw;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
198*4882a593Smuzhiyun clk_data);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static const struct scmi_device_id scmi_id_table[] = {
202*4882a593Smuzhiyun { SCMI_PROTOCOL_CLOCK, "clocks" },
203*4882a593Smuzhiyun { },
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun MODULE_DEVICE_TABLE(scmi, scmi_id_table);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun static struct scmi_driver scmi_clocks_driver = {
208*4882a593Smuzhiyun .name = "scmi-clocks",
209*4882a593Smuzhiyun .probe = scmi_clocks_probe,
210*4882a593Smuzhiyun .id_table = scmi_id_table,
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun module_scmi_driver(scmi_clocks_driver);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
215*4882a593Smuzhiyun MODULE_DESCRIPTION("ARM SCMI clock driver");
216*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
217