xref: /OK3568_Linux_fs/kernel/drivers/interconnect/qcom/bcm-voter.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <asm/div64.h>
7*4882a593Smuzhiyun #include <linux/interconnect-provider.h>
8*4882a593Smuzhiyun #include <linux/list_sort.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <soc/qcom/rpmh.h>
14*4882a593Smuzhiyun #include <soc/qcom/tcs.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "bcm-voter.h"
17*4882a593Smuzhiyun #include "icc-rpmh.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static LIST_HEAD(bcm_voters);
20*4882a593Smuzhiyun static DEFINE_MUTEX(bcm_voter_lock);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun  * struct bcm_voter - Bus Clock Manager voter
24*4882a593Smuzhiyun  * @dev: reference to the device that communicates with the BCM
25*4882a593Smuzhiyun  * @np: reference to the device node to match bcm voters
26*4882a593Smuzhiyun  * @lock: mutex to protect commit and wake/sleep lists in the voter
27*4882a593Smuzhiyun  * @commit_list: list containing bcms to be committed to hardware
28*4882a593Smuzhiyun  * @ws_list: list containing bcms that have different wake/sleep votes
29*4882a593Smuzhiyun  * @voter_node: list of bcm voters
30*4882a593Smuzhiyun  * @tcs_wait: mask for which buckets require TCS completion
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun struct bcm_voter {
33*4882a593Smuzhiyun 	struct device *dev;
34*4882a593Smuzhiyun 	struct device_node *np;
35*4882a593Smuzhiyun 	struct mutex lock;
36*4882a593Smuzhiyun 	struct list_head commit_list;
37*4882a593Smuzhiyun 	struct list_head ws_list;
38*4882a593Smuzhiyun 	struct list_head voter_node;
39*4882a593Smuzhiyun 	u32 tcs_wait;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
cmp_vcd(void * priv,struct list_head * a,struct list_head * b)42*4882a593Smuzhiyun static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	const struct qcom_icc_bcm *bcm_a =
45*4882a593Smuzhiyun 			list_entry(a, struct qcom_icc_bcm, list);
46*4882a593Smuzhiyun 	const struct qcom_icc_bcm *bcm_b =
47*4882a593Smuzhiyun 			list_entry(b, struct qcom_icc_bcm, list);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
50*4882a593Smuzhiyun 		return -1;
51*4882a593Smuzhiyun 	else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
52*4882a593Smuzhiyun 		return 0;
53*4882a593Smuzhiyun 	else
54*4882a593Smuzhiyun 		return 1;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
bcm_div(u64 num,u32 base)57*4882a593Smuzhiyun static u64 bcm_div(u64 num, u32 base)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	/* Ensure that small votes aren't lost. */
60*4882a593Smuzhiyun 	if (num && num < base)
61*4882a593Smuzhiyun 		return 1;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	do_div(num, base);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return num;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
bcm_aggregate(struct qcom_icc_bcm * bcm)68*4882a593Smuzhiyun static void bcm_aggregate(struct qcom_icc_bcm *bcm)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct qcom_icc_node *node;
71*4882a593Smuzhiyun 	size_t i, bucket;
72*4882a593Smuzhiyun 	u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
73*4882a593Smuzhiyun 	u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
74*4882a593Smuzhiyun 	u64 temp;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
77*4882a593Smuzhiyun 		for (i = 0; i < bcm->num_nodes; i++) {
78*4882a593Smuzhiyun 			node = bcm->nodes[i];
79*4882a593Smuzhiyun 			temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
80*4882a593Smuzhiyun 				       node->buswidth * node->channels);
81*4882a593Smuzhiyun 			agg_avg[bucket] = max(agg_avg[bucket], temp);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 			temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
84*4882a593Smuzhiyun 				       node->buswidth);
85*4882a593Smuzhiyun 			agg_peak[bucket] = max(agg_peak[bucket], temp);
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		temp = agg_avg[bucket] * bcm->vote_scale;
89*4882a593Smuzhiyun 		bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		temp = agg_peak[bucket] * bcm->vote_scale;
92*4882a593Smuzhiyun 		bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
96*4882a593Smuzhiyun 	    bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
97*4882a593Smuzhiyun 		bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
98*4882a593Smuzhiyun 		bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
99*4882a593Smuzhiyun 		bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
100*4882a593Smuzhiyun 		bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
tcs_cmd_gen(struct tcs_cmd * cmd,u64 vote_x,u64 vote_y,u32 addr,bool commit,bool wait)104*4882a593Smuzhiyun static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
105*4882a593Smuzhiyun 			       u32 addr, bool commit, bool wait)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	bool valid = true;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (!cmd)
110*4882a593Smuzhiyun 		return;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	memset(cmd, 0, sizeof(*cmd));
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (vote_x == 0 && vote_y == 0)
115*4882a593Smuzhiyun 		valid = false;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (vote_x > BCM_TCS_CMD_VOTE_MASK)
118*4882a593Smuzhiyun 		vote_x = BCM_TCS_CMD_VOTE_MASK;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (vote_y > BCM_TCS_CMD_VOTE_MASK)
121*4882a593Smuzhiyun 		vote_y = BCM_TCS_CMD_VOTE_MASK;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	cmd->addr = addr;
124*4882a593Smuzhiyun 	cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * Set the wait for completion flag on command that need to be completed
128*4882a593Smuzhiyun 	 * before the next command.
129*4882a593Smuzhiyun 	 */
130*4882a593Smuzhiyun 	cmd->wait = wait;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
tcs_list_gen(struct bcm_voter * voter,int bucket,struct tcs_cmd tcs_list[MAX_VCD],int n[MAX_VCD+1])133*4882a593Smuzhiyun static void tcs_list_gen(struct bcm_voter *voter, int bucket,
134*4882a593Smuzhiyun 			 struct tcs_cmd tcs_list[MAX_VCD],
135*4882a593Smuzhiyun 			 int n[MAX_VCD + 1])
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct list_head *bcm_list = &voter->commit_list;
138*4882a593Smuzhiyun 	struct qcom_icc_bcm *bcm;
139*4882a593Smuzhiyun 	bool commit, wait;
140*4882a593Smuzhiyun 	size_t idx = 0, batch = 0, cur_vcd_size = 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	memset(n, 0, sizeof(int) * (MAX_VCD + 1));
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	list_for_each_entry(bcm, bcm_list, list) {
145*4882a593Smuzhiyun 		commit = false;
146*4882a593Smuzhiyun 		cur_vcd_size++;
147*4882a593Smuzhiyun 		if ((list_is_last(&bcm->list, bcm_list)) ||
148*4882a593Smuzhiyun 		    bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
149*4882a593Smuzhiyun 			commit = true;
150*4882a593Smuzhiyun 			cur_vcd_size = 0;
151*4882a593Smuzhiyun 		}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		wait = commit && (voter->tcs_wait & BIT(bucket));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
156*4882a593Smuzhiyun 			    bcm->vote_y[bucket], bcm->addr, commit, wait);
157*4882a593Smuzhiyun 		idx++;
158*4882a593Smuzhiyun 		n[batch]++;
159*4882a593Smuzhiyun 		/*
160*4882a593Smuzhiyun 		 * Batch the BCMs in such a way that we do not split them in
161*4882a593Smuzhiyun 		 * multiple payloads when they are under the same VCD. This is
162*4882a593Smuzhiyun 		 * to ensure that every BCM is committed since we only set the
163*4882a593Smuzhiyun 		 * commit bit on the last BCM request of every VCD.
164*4882a593Smuzhiyun 		 */
165*4882a593Smuzhiyun 		if (n[batch] >= MAX_RPMH_PAYLOAD) {
166*4882a593Smuzhiyun 			if (!commit) {
167*4882a593Smuzhiyun 				n[batch] -= cur_vcd_size;
168*4882a593Smuzhiyun 				n[batch + 1] = cur_vcd_size;
169*4882a593Smuzhiyun 			}
170*4882a593Smuzhiyun 			batch++;
171*4882a593Smuzhiyun 		}
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * of_bcm_voter_get - gets a bcm voter handle from DT node
177*4882a593Smuzhiyun  * @dev: device pointer for the consumer device
178*4882a593Smuzhiyun  * @name: name for the bcm voter device
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  * This function will match a device_node pointer for the phandle
181*4882a593Smuzhiyun  * specified in the device DT and return a bcm_voter handle on success.
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
184*4882a593Smuzhiyun  * when matching bcm voter is yet to be found.
185*4882a593Smuzhiyun  */
of_bcm_voter_get(struct device * dev,const char * name)186*4882a593Smuzhiyun struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
189*4882a593Smuzhiyun 	struct bcm_voter *temp;
190*4882a593Smuzhiyun 	struct device_node *np, *node;
191*4882a593Smuzhiyun 	int idx = 0;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!dev || !dev->of_node)
194*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	np = dev->of_node;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (name) {
199*4882a593Smuzhiyun 		idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
200*4882a593Smuzhiyun 		if (idx < 0)
201*4882a593Smuzhiyun 			return ERR_PTR(idx);
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	node = of_parse_phandle(np, "qcom,bcm-voters", idx);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	mutex_lock(&bcm_voter_lock);
207*4882a593Smuzhiyun 	list_for_each_entry(temp, &bcm_voters, voter_node) {
208*4882a593Smuzhiyun 		if (temp->np == node) {
209*4882a593Smuzhiyun 			voter = temp;
210*4882a593Smuzhiyun 			break;
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 	mutex_unlock(&bcm_voter_lock);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	of_node_put(node);
216*4882a593Smuzhiyun 	return voter;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_bcm_voter_get);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
222*4882a593Smuzhiyun  * @voter: voter that the bcms are being added to
223*4882a593Smuzhiyun  * @bcm: bcm to add to the commit and wake sleep list
224*4882a593Smuzhiyun  */
qcom_icc_bcm_voter_add(struct bcm_voter * voter,struct qcom_icc_bcm * bcm)225*4882a593Smuzhiyun void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	if (!voter)
228*4882a593Smuzhiyun 		return;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	mutex_lock(&voter->lock);
231*4882a593Smuzhiyun 	if (list_empty(&bcm->list))
232*4882a593Smuzhiyun 		list_add_tail(&bcm->list, &voter->commit_list);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (list_empty(&bcm->ws_list))
235*4882a593Smuzhiyun 		list_add_tail(&bcm->ws_list, &voter->ws_list);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	mutex_unlock(&voter->lock);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun  * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
243*4882a593Smuzhiyun  * @voter: voter that needs flushing
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * This function generates a set of AMC commands and flushes to the BCM device
246*4882a593Smuzhiyun  * associated with the voter. It conditionally generate WAKE and SLEEP commands
247*4882a593Smuzhiyun  * based on deltas between WAKE/SLEEP requirements. The ws_list persists
248*4882a593Smuzhiyun  * through multiple commit requests and bcm nodes are removed only when the
249*4882a593Smuzhiyun  * requirements for WAKE matches SLEEP.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * Returns 0 on success, or an appropriate error code otherwise.
252*4882a593Smuzhiyun  */
qcom_icc_bcm_voter_commit(struct bcm_voter * voter)253*4882a593Smuzhiyun int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct qcom_icc_bcm *bcm;
256*4882a593Smuzhiyun 	struct qcom_icc_bcm *bcm_tmp;
257*4882a593Smuzhiyun 	int commit_idx[MAX_VCD + 1];
258*4882a593Smuzhiyun 	struct tcs_cmd cmds[MAX_BCMS];
259*4882a593Smuzhiyun 	int ret = 0;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (!voter)
262*4882a593Smuzhiyun 		return 0;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	mutex_lock(&voter->lock);
265*4882a593Smuzhiyun 	list_for_each_entry(bcm, &voter->commit_list, list)
266*4882a593Smuzhiyun 		bcm_aggregate(bcm);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * Pre sort the BCMs based on VCD for ease of generating a command list
270*4882a593Smuzhiyun 	 * that groups the BCMs with the same VCD together. VCDs are numbered
271*4882a593Smuzhiyun 	 * with lowest being the most expensive time wise, ensuring that
272*4882a593Smuzhiyun 	 * those commands are being sent the earliest in the queue. This needs
273*4882a593Smuzhiyun 	 * to be sorted every commit since we can't guarantee the order in which
274*4882a593Smuzhiyun 	 * the BCMs are added to the list.
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	list_sort(NULL, &voter->commit_list, cmp_vcd);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	 * Construct the command list based on a pre ordered list of BCMs
280*4882a593Smuzhiyun 	 * based on VCD.
281*4882a593Smuzhiyun 	 */
282*4882a593Smuzhiyun 	tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
283*4882a593Smuzhiyun 	if (!commit_idx[0])
284*4882a593Smuzhiyun 		goto out;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	rpmh_invalidate(voter->dev);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
289*4882a593Smuzhiyun 			       cmds, commit_idx);
290*4882a593Smuzhiyun 	if (ret) {
291*4882a593Smuzhiyun 		pr_err("Error sending AMC RPMH requests (%d)\n", ret);
292*4882a593Smuzhiyun 		goto out;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
296*4882a593Smuzhiyun 		list_del_init(&bcm->list);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
299*4882a593Smuzhiyun 		/*
300*4882a593Smuzhiyun 		 * Only generate WAKE and SLEEP commands if a resource's
301*4882a593Smuzhiyun 		 * requirements change as the execution environment transitions
302*4882a593Smuzhiyun 		 * between different power states.
303*4882a593Smuzhiyun 		 */
304*4882a593Smuzhiyun 		if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
305*4882a593Smuzhiyun 		    bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
306*4882a593Smuzhiyun 		    bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
307*4882a593Smuzhiyun 		    bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
308*4882a593Smuzhiyun 			list_add_tail(&bcm->list, &voter->commit_list);
309*4882a593Smuzhiyun 		else
310*4882a593Smuzhiyun 			list_del_init(&bcm->ws_list);
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (list_empty(&voter->commit_list))
314*4882a593Smuzhiyun 		goto out;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	list_sort(NULL, &voter->commit_list, cmp_vcd);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
321*4882a593Smuzhiyun 	if (ret) {
322*4882a593Smuzhiyun 		pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
323*4882a593Smuzhiyun 		goto out;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
329*4882a593Smuzhiyun 	if (ret) {
330*4882a593Smuzhiyun 		pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
331*4882a593Smuzhiyun 		goto out;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun out:
335*4882a593Smuzhiyun 	list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
336*4882a593Smuzhiyun 		list_del_init(&bcm->list);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	mutex_unlock(&voter->lock);
339*4882a593Smuzhiyun 	return ret;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
342*4882a593Smuzhiyun 
qcom_icc_bcm_voter_probe(struct platform_device * pdev)343*4882a593Smuzhiyun static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
346*4882a593Smuzhiyun 	struct bcm_voter *voter;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
349*4882a593Smuzhiyun 	if (!voter)
350*4882a593Smuzhiyun 		return -ENOMEM;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	voter->dev = &pdev->dev;
353*4882a593Smuzhiyun 	voter->np = np;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
356*4882a593Smuzhiyun 		voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	mutex_init(&voter->lock);
359*4882a593Smuzhiyun 	INIT_LIST_HEAD(&voter->commit_list);
360*4882a593Smuzhiyun 	INIT_LIST_HEAD(&voter->ws_list);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	mutex_lock(&bcm_voter_lock);
363*4882a593Smuzhiyun 	list_add_tail(&voter->voter_node, &bcm_voters);
364*4882a593Smuzhiyun 	mutex_unlock(&bcm_voter_lock);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun static const struct of_device_id bcm_voter_of_match[] = {
370*4882a593Smuzhiyun 	{ .compatible = "qcom,bcm-voter" },
371*4882a593Smuzhiyun 	{ }
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun static struct platform_driver qcom_icc_bcm_voter_driver = {
376*4882a593Smuzhiyun 	.probe = qcom_icc_bcm_voter_probe,
377*4882a593Smuzhiyun 	.driver = {
378*4882a593Smuzhiyun 		.name		= "bcm_voter",
379*4882a593Smuzhiyun 		.of_match_table = bcm_voter_of_match,
380*4882a593Smuzhiyun 	},
381*4882a593Smuzhiyun };
382*4882a593Smuzhiyun module_platform_driver(qcom_icc_bcm_voter_driver);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
385*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
386*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
387