xref: /OK3568_Linux_fs/kernel/drivers/firmware/arm_scmi/clock.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * System Control and Management Interface (SCMI) Clock Protocol
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2018-2020 ARM Ltd.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/sort.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "common.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun enum scmi_clock_protocol_cmd {
14*4882a593Smuzhiyun 	CLOCK_ATTRIBUTES = 0x3,
15*4882a593Smuzhiyun 	CLOCK_DESCRIBE_RATES = 0x4,
16*4882a593Smuzhiyun 	CLOCK_RATE_SET = 0x5,
17*4882a593Smuzhiyun 	CLOCK_RATE_GET = 0x6,
18*4882a593Smuzhiyun 	CLOCK_CONFIG_SET = 0x7,
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct scmi_msg_resp_clock_protocol_attributes {
22*4882a593Smuzhiyun 	__le16 num_clocks;
23*4882a593Smuzhiyun 	u8 max_async_req;
24*4882a593Smuzhiyun 	u8 reserved;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct scmi_msg_resp_clock_attributes {
28*4882a593Smuzhiyun 	__le32 attributes;
29*4882a593Smuzhiyun #define	CLOCK_ENABLE	BIT(0)
30*4882a593Smuzhiyun 	    u8 name[SCMI_MAX_STR_SIZE];
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct scmi_clock_set_config {
34*4882a593Smuzhiyun 	__le32 id;
35*4882a593Smuzhiyun 	__le32 attributes;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct scmi_msg_clock_describe_rates {
39*4882a593Smuzhiyun 	__le32 id;
40*4882a593Smuzhiyun 	__le32 rate_index;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct scmi_msg_resp_clock_describe_rates {
44*4882a593Smuzhiyun 	__le32 num_rates_flags;
45*4882a593Smuzhiyun #define NUM_RETURNED(x)		((x) & 0xfff)
46*4882a593Smuzhiyun #define RATE_DISCRETE(x)	!((x) & BIT(12))
47*4882a593Smuzhiyun #define NUM_REMAINING(x)	((x) >> 16)
48*4882a593Smuzhiyun 	struct {
49*4882a593Smuzhiyun 		__le32 value_low;
50*4882a593Smuzhiyun 		__le32 value_high;
51*4882a593Smuzhiyun 	} rate[0];
52*4882a593Smuzhiyun #define RATE_TO_U64(X)		\
53*4882a593Smuzhiyun ({				\
54*4882a593Smuzhiyun 	typeof(X) x = (X);	\
55*4882a593Smuzhiyun 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
56*4882a593Smuzhiyun })
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun struct scmi_clock_set_rate {
60*4882a593Smuzhiyun 	__le32 flags;
61*4882a593Smuzhiyun #define CLOCK_SET_ASYNC		BIT(0)
62*4882a593Smuzhiyun #define CLOCK_SET_IGNORE_RESP	BIT(1)
63*4882a593Smuzhiyun #define CLOCK_SET_ROUND_UP	BIT(2)
64*4882a593Smuzhiyun #define CLOCK_SET_ROUND_AUTO	BIT(3)
65*4882a593Smuzhiyun 	__le32 id;
66*4882a593Smuzhiyun 	__le32 value_low;
67*4882a593Smuzhiyun 	__le32 value_high;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct clock_info {
71*4882a593Smuzhiyun 	u32 version;
72*4882a593Smuzhiyun 	int num_clocks;
73*4882a593Smuzhiyun 	int max_async_req;
74*4882a593Smuzhiyun 	atomic_t cur_async_req;
75*4882a593Smuzhiyun 	struct scmi_clock_info *clk;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static int
scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle * ph,struct clock_info * ci)79*4882a593Smuzhiyun scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
80*4882a593Smuzhiyun 				   struct clock_info *ci)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	int ret;
83*4882a593Smuzhiyun 	struct scmi_xfer *t;
84*4882a593Smuzhiyun 	struct scmi_msg_resp_clock_protocol_attributes *attr;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
87*4882a593Smuzhiyun 				      0, sizeof(*attr), &t);
88*4882a593Smuzhiyun 	if (ret)
89*4882a593Smuzhiyun 		return ret;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	attr = t->rx.buf;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	ret = ph->xops->do_xfer(ph, t);
94*4882a593Smuzhiyun 	if (!ret) {
95*4882a593Smuzhiyun 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
96*4882a593Smuzhiyun 		ci->max_async_req = attr->max_async_req;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
100*4882a593Smuzhiyun 	return ret;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
scmi_clock_attributes_get(const struct scmi_protocol_handle * ph,u32 clk_id,struct scmi_clock_info * clk)103*4882a593Smuzhiyun static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
104*4882a593Smuzhiyun 				     u32 clk_id, struct scmi_clock_info *clk)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	int ret;
107*4882a593Smuzhiyun 	struct scmi_xfer *t;
108*4882a593Smuzhiyun 	struct scmi_msg_resp_clock_attributes *attr;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
111*4882a593Smuzhiyun 				      sizeof(clk_id), sizeof(*attr), &t);
112*4882a593Smuzhiyun 	if (ret)
113*4882a593Smuzhiyun 		return ret;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	put_unaligned_le32(clk_id, t->tx.buf);
116*4882a593Smuzhiyun 	attr = t->rx.buf;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ret = ph->xops->do_xfer(ph, t);
119*4882a593Smuzhiyun 	if (!ret)
120*4882a593Smuzhiyun 		strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
121*4882a593Smuzhiyun 	else
122*4882a593Smuzhiyun 		clk->name[0] = '\0';
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
125*4882a593Smuzhiyun 	return ret;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
rate_cmp_func(const void * _r1,const void * _r2)128*4882a593Smuzhiyun static int rate_cmp_func(const void *_r1, const void *_r2)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	const u64 *r1 = _r1, *r2 = _r2;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (*r1 < *r2)
133*4882a593Smuzhiyun 		return -1;
134*4882a593Smuzhiyun 	else if (*r1 == *r2)
135*4882a593Smuzhiyun 		return 0;
136*4882a593Smuzhiyun 	else
137*4882a593Smuzhiyun 		return 1;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static int
scmi_clock_describe_rates_get(const struct scmi_protocol_handle * ph,u32 clk_id,struct scmi_clock_info * clk)141*4882a593Smuzhiyun scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
142*4882a593Smuzhiyun 			      struct scmi_clock_info *clk)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	u64 *rate = NULL;
145*4882a593Smuzhiyun 	int ret, cnt;
146*4882a593Smuzhiyun 	bool rate_discrete = false;
147*4882a593Smuzhiyun 	u32 tot_rate_cnt = 0, rates_flag;
148*4882a593Smuzhiyun 	u16 num_returned, num_remaining;
149*4882a593Smuzhiyun 	struct scmi_xfer *t;
150*4882a593Smuzhiyun 	struct scmi_msg_clock_describe_rates *clk_desc;
151*4882a593Smuzhiyun 	struct scmi_msg_resp_clock_describe_rates *rlist;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
154*4882a593Smuzhiyun 				      sizeof(*clk_desc), 0, &t);
155*4882a593Smuzhiyun 	if (ret)
156*4882a593Smuzhiyun 		return ret;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	clk_desc = t->tx.buf;
159*4882a593Smuzhiyun 	rlist = t->rx.buf;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	do {
162*4882a593Smuzhiyun 		clk_desc->id = cpu_to_le32(clk_id);
163*4882a593Smuzhiyun 		/* Set the number of rates to be skipped/already read */
164*4882a593Smuzhiyun 		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		ret = ph->xops->do_xfer(ph, t);
167*4882a593Smuzhiyun 		if (ret)
168*4882a593Smuzhiyun 			goto err;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		rates_flag = le32_to_cpu(rlist->num_rates_flags);
171*4882a593Smuzhiyun 		num_remaining = NUM_REMAINING(rates_flag);
172*4882a593Smuzhiyun 		rate_discrete = RATE_DISCRETE(rates_flag);
173*4882a593Smuzhiyun 		num_returned = NUM_RETURNED(rates_flag);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
176*4882a593Smuzhiyun 			dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
177*4882a593Smuzhiyun 			break;
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		if (!rate_discrete) {
181*4882a593Smuzhiyun 			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
182*4882a593Smuzhiyun 			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
183*4882a593Smuzhiyun 			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
184*4882a593Smuzhiyun 			dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
185*4882a593Smuzhiyun 				clk->range.min_rate, clk->range.max_rate,
186*4882a593Smuzhiyun 				clk->range.step_size);
187*4882a593Smuzhiyun 			break;
188*4882a593Smuzhiyun 		}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		rate = &clk->list.rates[tot_rate_cnt];
191*4882a593Smuzhiyun 		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
192*4882a593Smuzhiyun 			*rate = RATE_TO_U64(rlist->rate[cnt]);
193*4882a593Smuzhiyun 			dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
194*4882a593Smuzhiyun 		}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		tot_rate_cnt += num_returned;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		ph->xops->reset_rx_to_maxsz(ph, t);
199*4882a593Smuzhiyun 		/*
200*4882a593Smuzhiyun 		 * check for both returned and remaining to avoid infinite
201*4882a593Smuzhiyun 		 * loop due to buggy firmware
202*4882a593Smuzhiyun 		 */
203*4882a593Smuzhiyun 	} while (num_returned && num_remaining);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (rate_discrete && rate) {
206*4882a593Smuzhiyun 		clk->list.num_rates = tot_rate_cnt;
207*4882a593Smuzhiyun 		sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
208*4882a593Smuzhiyun 		     rate_cmp_func, NULL);
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	clk->rate_discrete = rate_discrete;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun err:
214*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
215*4882a593Smuzhiyun 	return ret;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun static int
scmi_clock_rate_get(const struct scmi_protocol_handle * ph,u32 clk_id,u64 * value)219*4882a593Smuzhiyun scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
220*4882a593Smuzhiyun 		    u32 clk_id, u64 *value)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	int ret;
223*4882a593Smuzhiyun 	struct scmi_xfer *t;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
226*4882a593Smuzhiyun 				      sizeof(__le32), sizeof(u64), &t);
227*4882a593Smuzhiyun 	if (ret)
228*4882a593Smuzhiyun 		return ret;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	put_unaligned_le32(clk_id, t->tx.buf);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	ret = ph->xops->do_xfer(ph, t);
233*4882a593Smuzhiyun 	if (!ret)
234*4882a593Smuzhiyun 		*value = get_unaligned_le64(t->rx.buf);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
237*4882a593Smuzhiyun 	return ret;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
scmi_clock_rate_set(const struct scmi_protocol_handle * ph,u32 clk_id,u64 rate)240*4882a593Smuzhiyun static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
241*4882a593Smuzhiyun 			       u32 clk_id, u64 rate)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	int ret;
244*4882a593Smuzhiyun 	u32 flags = 0;
245*4882a593Smuzhiyun 	struct scmi_xfer *t;
246*4882a593Smuzhiyun 	struct scmi_clock_set_rate *cfg;
247*4882a593Smuzhiyun 	struct clock_info *ci = ph->get_priv(ph);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
250*4882a593Smuzhiyun 	if (ret)
251*4882a593Smuzhiyun 		return ret;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (ci->max_async_req &&
254*4882a593Smuzhiyun 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
255*4882a593Smuzhiyun 		flags |= CLOCK_SET_ASYNC;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	cfg = t->tx.buf;
258*4882a593Smuzhiyun 	cfg->flags = cpu_to_le32(flags);
259*4882a593Smuzhiyun 	cfg->id = cpu_to_le32(clk_id);
260*4882a593Smuzhiyun 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
261*4882a593Smuzhiyun 	cfg->value_high = cpu_to_le32(rate >> 32);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (flags & CLOCK_SET_ASYNC)
264*4882a593Smuzhiyun 		ret = ph->xops->do_xfer_with_response(ph, t);
265*4882a593Smuzhiyun 	else
266*4882a593Smuzhiyun 		ret = ph->xops->do_xfer(ph, t);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (ci->max_async_req)
269*4882a593Smuzhiyun 		atomic_dec(&ci->cur_async_req);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
272*4882a593Smuzhiyun 	return ret;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static int
scmi_clock_config_set(const struct scmi_protocol_handle * ph,u32 clk_id,u32 config)276*4882a593Smuzhiyun scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
277*4882a593Smuzhiyun 		      u32 config)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	int ret;
280*4882a593Smuzhiyun 	struct scmi_xfer *t;
281*4882a593Smuzhiyun 	struct scmi_clock_set_config *cfg;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
284*4882a593Smuzhiyun 				      sizeof(*cfg), 0, &t);
285*4882a593Smuzhiyun 	if (ret)
286*4882a593Smuzhiyun 		return ret;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	cfg = t->tx.buf;
289*4882a593Smuzhiyun 	cfg->id = cpu_to_le32(clk_id);
290*4882a593Smuzhiyun 	cfg->attributes = cpu_to_le32(config);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	ret = ph->xops->do_xfer(ph, t);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	ph->xops->xfer_put(ph, t);
295*4882a593Smuzhiyun 	return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
scmi_clock_enable(const struct scmi_protocol_handle * ph,u32 clk_id)298*4882a593Smuzhiyun static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
scmi_clock_disable(const struct scmi_protocol_handle * ph,u32 clk_id)303*4882a593Smuzhiyun static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	return scmi_clock_config_set(ph, clk_id, 0);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
scmi_clock_count_get(const struct scmi_protocol_handle * ph)308*4882a593Smuzhiyun static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct clock_info *ci = ph->get_priv(ph);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	return ci->num_clocks;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle * ph,u32 clk_id)316*4882a593Smuzhiyun scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	struct clock_info *ci = ph->get_priv(ph);
319*4882a593Smuzhiyun 	struct scmi_clock_info *clk = ci->clk + clk_id;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (!clk->name[0])
322*4882a593Smuzhiyun 		return NULL;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return clk;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun static const struct scmi_clk_proto_ops clk_proto_ops = {
328*4882a593Smuzhiyun 	.count_get = scmi_clock_count_get,
329*4882a593Smuzhiyun 	.info_get = scmi_clock_info_get,
330*4882a593Smuzhiyun 	.rate_get = scmi_clock_rate_get,
331*4882a593Smuzhiyun 	.rate_set = scmi_clock_rate_set,
332*4882a593Smuzhiyun 	.enable = scmi_clock_enable,
333*4882a593Smuzhiyun 	.disable = scmi_clock_disable,
334*4882a593Smuzhiyun };
335*4882a593Smuzhiyun 
scmi_clock_protocol_init(const struct scmi_protocol_handle * ph)336*4882a593Smuzhiyun static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	u32 version;
339*4882a593Smuzhiyun 	int clkid, ret;
340*4882a593Smuzhiyun 	struct clock_info *cinfo;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	ph->xops->version_get(ph, &version);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
345*4882a593Smuzhiyun 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
348*4882a593Smuzhiyun 	if (!cinfo)
349*4882a593Smuzhiyun 		return -ENOMEM;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	scmi_clock_protocol_attributes_get(ph, cinfo);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
354*4882a593Smuzhiyun 				  sizeof(*cinfo->clk), GFP_KERNEL);
355*4882a593Smuzhiyun 	if (!cinfo->clk)
356*4882a593Smuzhiyun 		return -ENOMEM;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
359*4882a593Smuzhiyun 		struct scmi_clock_info *clk = cinfo->clk + clkid;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		ret = scmi_clock_attributes_get(ph, clkid, clk);
362*4882a593Smuzhiyun 		if (!ret)
363*4882a593Smuzhiyun 			scmi_clock_describe_rates_get(ph, clkid, clk);
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	cinfo->version = version;
367*4882a593Smuzhiyun 	return ph->set_priv(ph, cinfo);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun static const struct scmi_protocol scmi_clock = {
371*4882a593Smuzhiyun 	.id = SCMI_PROTOCOL_CLOCK,
372*4882a593Smuzhiyun 	.owner = THIS_MODULE,
373*4882a593Smuzhiyun 	.init_instance = &scmi_clock_protocol_init,
374*4882a593Smuzhiyun 	.ops = &clk_proto_ops,
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
378