xref: /OK3568_Linux_fs/kernel/drivers/mtd/hyperbus/hbmc-am654.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
4*4882a593Smuzhiyun // Author: Vignesh Raghavendra <vigneshr@ti.com>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/completion.h>
7*4882a593Smuzhiyun #include <linux/dma-direction.h>
8*4882a593Smuzhiyun #include <linux/dma-mapping.h>
9*4882a593Smuzhiyun #include <linux/dmaengine.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/mtd/cfi.h>
14*4882a593Smuzhiyun #include <linux/mtd/hyperbus.h>
15*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
16*4882a593Smuzhiyun #include <linux/mux/consumer.h>
17*4882a593Smuzhiyun #include <linux/of.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define AM654_HBMC_CALIB_COUNT 25
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct am654_hbmc_device_priv {
26*4882a593Smuzhiyun 	struct completion rx_dma_complete;
27*4882a593Smuzhiyun 	phys_addr_t device_base;
28*4882a593Smuzhiyun 	struct hyperbus_ctlr *ctlr;
29*4882a593Smuzhiyun 	struct dma_chan *rx_chan;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct am654_hbmc_priv {
33*4882a593Smuzhiyun 	struct hyperbus_ctlr ctlr;
34*4882a593Smuzhiyun 	struct hyperbus_device hbdev;
35*4882a593Smuzhiyun 	struct mux_control *mux_ctrl;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
am654_hbmc_calibrate(struct hyperbus_device * hbdev)38*4882a593Smuzhiyun static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct map_info *map = &hbdev->map;
41*4882a593Smuzhiyun 	struct cfi_private cfi;
42*4882a593Smuzhiyun 	int count = AM654_HBMC_CALIB_COUNT;
43*4882a593Smuzhiyun 	int pass_count = 0;
44*4882a593Smuzhiyun 	int ret;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	cfi.interleave = 1;
47*4882a593Smuzhiyun 	cfi.device_type = CFI_DEVICETYPE_X16;
48*4882a593Smuzhiyun 	cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
49*4882a593Smuzhiyun 	cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	while (count--) {
52*4882a593Smuzhiyun 		ret = cfi_qry_present(map, 0, &cfi);
53*4882a593Smuzhiyun 		if (ret)
54*4882a593Smuzhiyun 			pass_count++;
55*4882a593Smuzhiyun 		else
56*4882a593Smuzhiyun 			pass_count = 0;
57*4882a593Smuzhiyun 		if (pass_count == 5)
58*4882a593Smuzhiyun 			break;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	cfi_qry_mode_off(0, map, &cfi);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return ret;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
am654_hbmc_dma_callback(void * param)66*4882a593Smuzhiyun static void am654_hbmc_dma_callback(void *param)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct am654_hbmc_device_priv *priv = param;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	complete(&priv->rx_dma_complete);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
am654_hbmc_dma_read(struct am654_hbmc_device_priv * priv,void * to,unsigned long from,ssize_t len)73*4882a593Smuzhiyun static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
74*4882a593Smuzhiyun 			       unsigned long from, ssize_t len)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
78*4882a593Smuzhiyun 	struct dma_chan *rx_chan = priv->rx_chan;
79*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
80*4882a593Smuzhiyun 	dma_addr_t dma_dst, dma_src;
81*4882a593Smuzhiyun 	dma_cookie_t cookie;
82*4882a593Smuzhiyun 	int ret;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
85*4882a593Smuzhiyun 		return -EINVAL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
88*4882a593Smuzhiyun 	if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
89*4882a593Smuzhiyun 		dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
90*4882a593Smuzhiyun 		return -EIO;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	dma_src = priv->device_base + from;
94*4882a593Smuzhiyun 	tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
95*4882a593Smuzhiyun 	if (!tx) {
96*4882a593Smuzhiyun 		dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
97*4882a593Smuzhiyun 		ret = -EIO;
98*4882a593Smuzhiyun 		goto unmap_dma;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	reinit_completion(&priv->rx_dma_complete);
102*4882a593Smuzhiyun 	tx->callback = am654_hbmc_dma_callback;
103*4882a593Smuzhiyun 	tx->callback_param = priv;
104*4882a593Smuzhiyun 	cookie = dmaengine_submit(tx);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	ret = dma_submit_error(cookie);
107*4882a593Smuzhiyun 	if (ret) {
108*4882a593Smuzhiyun 		dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
109*4882a593Smuzhiyun 		goto unmap_dma;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	dma_async_issue_pending(rx_chan);
113*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&priv->rx_dma_complete,  msecs_to_jiffies(len + 1000))) {
114*4882a593Smuzhiyun 		dmaengine_terminate_sync(rx_chan);
115*4882a593Smuzhiyun 		dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
116*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun unmap_dma:
120*4882a593Smuzhiyun 	dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
121*4882a593Smuzhiyun 	return ret;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
am654_hbmc_read(struct hyperbus_device * hbdev,void * to,unsigned long from,ssize_t len)124*4882a593Smuzhiyun static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
125*4882a593Smuzhiyun 			    unsigned long from, ssize_t len)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct am654_hbmc_device_priv *priv = hbdev->priv;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
130*4882a593Smuzhiyun 		memcpy_fromio(to, hbdev->map.virt + from, len);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun static const struct hyperbus_ops am654_hbmc_ops = {
134*4882a593Smuzhiyun 	.calibrate = am654_hbmc_calibrate,
135*4882a593Smuzhiyun 	.copy_from = am654_hbmc_read,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv * priv)138*4882a593Smuzhiyun static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct dma_chan *rx_chan;
141*4882a593Smuzhiyun 	dma_cap_mask_t mask;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	dma_cap_zero(mask);
144*4882a593Smuzhiyun 	dma_cap_set(DMA_MEMCPY, mask);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	rx_chan = dma_request_chan_by_mask(&mask);
147*4882a593Smuzhiyun 	if (IS_ERR(rx_chan)) {
148*4882a593Smuzhiyun 		if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
149*4882a593Smuzhiyun 			return -EPROBE_DEFER;
150*4882a593Smuzhiyun 		dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
151*4882a593Smuzhiyun 		return 0;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 	priv->rx_chan = rx_chan;
154*4882a593Smuzhiyun 	init_completion(&priv->rx_dma_complete);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
am654_hbmc_probe(struct platform_device * pdev)159*4882a593Smuzhiyun static int am654_hbmc_probe(struct platform_device *pdev)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
162*4882a593Smuzhiyun 	struct am654_hbmc_device_priv *dev_priv;
163*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
164*4882a593Smuzhiyun 	struct am654_hbmc_priv *priv;
165*4882a593Smuzhiyun 	struct resource res;
166*4882a593Smuzhiyun 	int ret;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
169*4882a593Smuzhiyun 	if (!priv)
170*4882a593Smuzhiyun 		return -ENOMEM;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	platform_set_drvdata(pdev, priv);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	priv->hbdev.np = of_get_next_child(np, NULL);
175*4882a593Smuzhiyun 	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
176*4882a593Smuzhiyun 	if (ret)
177*4882a593Smuzhiyun 		return ret;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (of_property_read_bool(dev->of_node, "mux-controls")) {
180*4882a593Smuzhiyun 		struct mux_control *control = devm_mux_control_get(dev, NULL);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		if (IS_ERR(control))
183*4882a593Smuzhiyun 			return PTR_ERR(control);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		ret = mux_control_select(control, 1);
186*4882a593Smuzhiyun 		if (ret) {
187*4882a593Smuzhiyun 			dev_err(dev, "Failed to select HBMC mux\n");
188*4882a593Smuzhiyun 			return ret;
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 		priv->mux_ctrl = control;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	priv->hbdev.map.size = resource_size(&res);
194*4882a593Smuzhiyun 	priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
195*4882a593Smuzhiyun 	if (IS_ERR(priv->hbdev.map.virt))
196*4882a593Smuzhiyun 		return PTR_ERR(priv->hbdev.map.virt);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	priv->ctlr.dev = dev;
199*4882a593Smuzhiyun 	priv->ctlr.ops = &am654_hbmc_ops;
200*4882a593Smuzhiyun 	priv->hbdev.ctlr = &priv->ctlr;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
203*4882a593Smuzhiyun 	if (!dev_priv) {
204*4882a593Smuzhiyun 		ret = -ENOMEM;
205*4882a593Smuzhiyun 		goto disable_mux;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	priv->hbdev.priv = dev_priv;
209*4882a593Smuzhiyun 	dev_priv->device_base = res.start;
210*4882a593Smuzhiyun 	dev_priv->ctlr = &priv->ctlr;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	ret = am654_hbmc_request_mmap_dma(dev_priv);
213*4882a593Smuzhiyun 	if (ret)
214*4882a593Smuzhiyun 		goto disable_mux;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	ret = hyperbus_register_device(&priv->hbdev);
217*4882a593Smuzhiyun 	if (ret) {
218*4882a593Smuzhiyun 		dev_err(dev, "failed to register controller\n");
219*4882a593Smuzhiyun 		goto release_dma;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun release_dma:
224*4882a593Smuzhiyun 	if (dev_priv->rx_chan)
225*4882a593Smuzhiyun 		dma_release_channel(dev_priv->rx_chan);
226*4882a593Smuzhiyun disable_mux:
227*4882a593Smuzhiyun 	if (priv->mux_ctrl)
228*4882a593Smuzhiyun 		mux_control_deselect(priv->mux_ctrl);
229*4882a593Smuzhiyun 	return ret;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
am654_hbmc_remove(struct platform_device * pdev)232*4882a593Smuzhiyun static int am654_hbmc_remove(struct platform_device *pdev)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
235*4882a593Smuzhiyun 	struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
236*4882a593Smuzhiyun 	int ret;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ret = hyperbus_unregister_device(&priv->hbdev);
239*4882a593Smuzhiyun 	if (priv->mux_ctrl)
240*4882a593Smuzhiyun 		mux_control_deselect(priv->mux_ctrl);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (dev_priv->rx_chan)
243*4882a593Smuzhiyun 		dma_release_channel(dev_priv->rx_chan);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return ret;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun static const struct of_device_id am654_hbmc_dt_ids[] = {
249*4882a593Smuzhiyun 	{
250*4882a593Smuzhiyun 		.compatible = "ti,am654-hbmc",
251*4882a593Smuzhiyun 	},
252*4882a593Smuzhiyun 	{ /* end of table */ }
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun static struct platform_driver am654_hbmc_platform_driver = {
258*4882a593Smuzhiyun 	.probe = am654_hbmc_probe,
259*4882a593Smuzhiyun 	.remove = am654_hbmc_remove,
260*4882a593Smuzhiyun 	.driver = {
261*4882a593Smuzhiyun 		.name = "hbmc-am654",
262*4882a593Smuzhiyun 		.of_match_table = am654_hbmc_dt_ids,
263*4882a593Smuzhiyun 	},
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun module_platform_driver(am654_hbmc_platform_driver);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
269*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
270*4882a593Smuzhiyun MODULE_ALIAS("platform:hbmc-am654");
271*4882a593Smuzhiyun MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
272