1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Intel Corporation.
4*4882a593Smuzhiyun * Lei Chuanhua <Chuanhua.lei@intel.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/bitfield.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/of_device.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/reboot.h>
12*4882a593Smuzhiyun #include <linux/regmap.h>
13*4882a593Smuzhiyun #include <linux/reset-controller.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define RCU_RST_STAT 0x0024
16*4882a593Smuzhiyun #define RCU_RST_REQ 0x0048
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define REG_OFFSET_MASK GENMASK(31, 16)
19*4882a593Smuzhiyun #define BIT_OFFSET_MASK GENMASK(15, 8)
20*4882a593Smuzhiyun #define STAT_BIT_OFFSET_MASK GENMASK(7, 0)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct intel_reset_soc {
25*4882a593Smuzhiyun bool legacy;
26*4882a593Smuzhiyun u32 reset_cell_count;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct intel_reset_data {
30*4882a593Smuzhiyun struct reset_controller_dev rcdev;
31*4882a593Smuzhiyun struct notifier_block restart_nb;
32*4882a593Smuzhiyun const struct intel_reset_soc *soc_data;
33*4882a593Smuzhiyun struct regmap *regmap;
34*4882a593Smuzhiyun struct device *dev;
35*4882a593Smuzhiyun u32 reboot_id;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static const struct regmap_config intel_rcu_regmap_config = {
39*4882a593Smuzhiyun .name = "intel-reset",
40*4882a593Smuzhiyun .reg_bits = 32,
41*4882a593Smuzhiyun .reg_stride = 4,
42*4882a593Smuzhiyun .val_bits = 32,
43*4882a593Smuzhiyun .fast_io = true,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Reset status register offset relative to
48*4882a593Smuzhiyun * the reset control register(X) is X + 4
49*4882a593Smuzhiyun */
id_to_reg_and_bit_offsets(struct intel_reset_data * data,unsigned long id,u32 * rst_req,u32 * req_bit,u32 * stat_bit)50*4882a593Smuzhiyun static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
51*4882a593Smuzhiyun unsigned long id, u32 *rst_req,
52*4882a593Smuzhiyun u32 *req_bit, u32 *stat_bit)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun *rst_req = FIELD_GET(REG_OFFSET_MASK, id);
55*4882a593Smuzhiyun *req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (data->soc_data->legacy)
58*4882a593Smuzhiyun *stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
59*4882a593Smuzhiyun else
60*4882a593Smuzhiyun *stat_bit = *req_bit;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
63*4882a593Smuzhiyun return RCU_RST_STAT;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun return *rst_req + 0x4;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
intel_set_clr_bits(struct intel_reset_data * data,unsigned long id,bool set)68*4882a593Smuzhiyun static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
69*4882a593Smuzhiyun bool set)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun u32 rst_req, req_bit, rst_stat, stat_bit, val;
72*4882a593Smuzhiyun int ret;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
75*4882a593Smuzhiyun &req_bit, &stat_bit);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun val = set ? BIT(req_bit) : 0;
78*4882a593Smuzhiyun ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val);
79*4882a593Smuzhiyun if (ret)
80*4882a593Smuzhiyun return ret;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return regmap_read_poll_timeout(data->regmap, rst_stat, val,
83*4882a593Smuzhiyun set == !!(val & BIT(stat_bit)), 20,
84*4882a593Smuzhiyun 200);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
intel_assert_device(struct reset_controller_dev * rcdev,unsigned long id)87*4882a593Smuzhiyun static int intel_assert_device(struct reset_controller_dev *rcdev,
88*4882a593Smuzhiyun unsigned long id)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct intel_reset_data *data = to_reset_data(rcdev);
91*4882a593Smuzhiyun int ret;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun ret = intel_set_clr_bits(data, id, true);
94*4882a593Smuzhiyun if (ret)
95*4882a593Smuzhiyun dev_err(data->dev, "Reset assert failed %d\n", ret);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return ret;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
intel_deassert_device(struct reset_controller_dev * rcdev,unsigned long id)100*4882a593Smuzhiyun static int intel_deassert_device(struct reset_controller_dev *rcdev,
101*4882a593Smuzhiyun unsigned long id)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct intel_reset_data *data = to_reset_data(rcdev);
104*4882a593Smuzhiyun int ret;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ret = intel_set_clr_bits(data, id, false);
107*4882a593Smuzhiyun if (ret)
108*4882a593Smuzhiyun dev_err(data->dev, "Reset deassert failed %d\n", ret);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return ret;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
intel_reset_status(struct reset_controller_dev * rcdev,unsigned long id)113*4882a593Smuzhiyun static int intel_reset_status(struct reset_controller_dev *rcdev,
114*4882a593Smuzhiyun unsigned long id)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct intel_reset_data *data = to_reset_data(rcdev);
117*4882a593Smuzhiyun u32 rst_req, req_bit, rst_stat, stat_bit, val;
118*4882a593Smuzhiyun int ret;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
121*4882a593Smuzhiyun &req_bit, &stat_bit);
122*4882a593Smuzhiyun ret = regmap_read(data->regmap, rst_stat, &val);
123*4882a593Smuzhiyun if (ret)
124*4882a593Smuzhiyun return ret;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return !!(val & BIT(stat_bit));
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun static const struct reset_control_ops intel_reset_ops = {
130*4882a593Smuzhiyun .assert = intel_assert_device,
131*4882a593Smuzhiyun .deassert = intel_deassert_device,
132*4882a593Smuzhiyun .status = intel_reset_status,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
intel_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * spec)135*4882a593Smuzhiyun static int intel_reset_xlate(struct reset_controller_dev *rcdev,
136*4882a593Smuzhiyun const struct of_phandle_args *spec)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct intel_reset_data *data = to_reset_data(rcdev);
139*4882a593Smuzhiyun u32 id;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (spec->args[1] > 31)
142*4882a593Smuzhiyun return -EINVAL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
145*4882a593Smuzhiyun id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (data->soc_data->legacy) {
148*4882a593Smuzhiyun if (spec->args[2] > 31)
149*4882a593Smuzhiyun return -EINVAL;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return id;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
intel_reset_restart_handler(struct notifier_block * nb,unsigned long action,void * data)157*4882a593Smuzhiyun static int intel_reset_restart_handler(struct notifier_block *nb,
158*4882a593Smuzhiyun unsigned long action, void *data)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun struct intel_reset_data *reset_data;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun reset_data = container_of(nb, struct intel_reset_data, restart_nb);
163*4882a593Smuzhiyun intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return NOTIFY_DONE;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
intel_reset_probe(struct platform_device * pdev)168*4882a593Smuzhiyun static int intel_reset_probe(struct platform_device *pdev)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
171*4882a593Smuzhiyun struct device *dev = &pdev->dev;
172*4882a593Smuzhiyun struct intel_reset_data *data;
173*4882a593Smuzhiyun void __iomem *base;
174*4882a593Smuzhiyun u32 rb_id[3];
175*4882a593Smuzhiyun int ret;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
178*4882a593Smuzhiyun if (!data)
179*4882a593Smuzhiyun return -ENOMEM;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun data->soc_data = of_device_get_match_data(dev);
182*4882a593Smuzhiyun if (!data->soc_data)
183*4882a593Smuzhiyun return -ENODEV;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun base = devm_platform_ioremap_resource(pdev, 0);
186*4882a593Smuzhiyun if (IS_ERR(base))
187*4882a593Smuzhiyun return PTR_ERR(base);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun data->regmap = devm_regmap_init_mmio(dev, base,
190*4882a593Smuzhiyun &intel_rcu_regmap_config);
191*4882a593Smuzhiyun if (IS_ERR(data->regmap)) {
192*4882a593Smuzhiyun dev_err(dev, "regmap initialization failed\n");
193*4882a593Smuzhiyun return PTR_ERR(data->regmap);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
197*4882a593Smuzhiyun data->soc_data->reset_cell_count);
198*4882a593Smuzhiyun if (ret) {
199*4882a593Smuzhiyun dev_err(dev, "Failed to get global reset offset!\n");
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun data->dev = dev;
204*4882a593Smuzhiyun data->rcdev.of_node = np;
205*4882a593Smuzhiyun data->rcdev.owner = dev->driver->owner;
206*4882a593Smuzhiyun data->rcdev.ops = &intel_reset_ops;
207*4882a593Smuzhiyun data->rcdev.of_xlate = intel_reset_xlate;
208*4882a593Smuzhiyun data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count;
209*4882a593Smuzhiyun ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
210*4882a593Smuzhiyun if (ret)
211*4882a593Smuzhiyun return ret;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
214*4882a593Smuzhiyun data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (data->soc_data->legacy)
217*4882a593Smuzhiyun data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun data->restart_nb.notifier_call = intel_reset_restart_handler;
220*4882a593Smuzhiyun data->restart_nb.priority = 128;
221*4882a593Smuzhiyun register_restart_handler(&data->restart_nb);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun static const struct intel_reset_soc xrx200_data = {
227*4882a593Smuzhiyun .legacy = true,
228*4882a593Smuzhiyun .reset_cell_count = 3,
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun static const struct intel_reset_soc lgm_data = {
232*4882a593Smuzhiyun .legacy = false,
233*4882a593Smuzhiyun .reset_cell_count = 2,
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static const struct of_device_id intel_reset_match[] = {
237*4882a593Smuzhiyun { .compatible = "intel,rcu-lgm", .data = &lgm_data },
238*4882a593Smuzhiyun { .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
239*4882a593Smuzhiyun {}
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun static struct platform_driver intel_reset_driver = {
243*4882a593Smuzhiyun .probe = intel_reset_probe,
244*4882a593Smuzhiyun .driver = {
245*4882a593Smuzhiyun .name = "intel-reset",
246*4882a593Smuzhiyun .of_match_table = intel_reset_match,
247*4882a593Smuzhiyun },
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun
intel_reset_init(void)250*4882a593Smuzhiyun static int __init intel_reset_init(void)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun return platform_driver_register(&intel_reset_driver);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * RCU is system core entity which is in Always On Domain whose clocks
257*4882a593Smuzhiyun * or resource initialization happens in system core initialization.
258*4882a593Smuzhiyun * Also, it is required for most of the platform or architecture
259*4882a593Smuzhiyun * specific devices to perform reset operation as part of initialization.
260*4882a593Smuzhiyun * So perform RCU as post core initialization.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun postcore_initcall(intel_reset_init);
263