1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ACPI helpers for DMA request / controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on of-dma.c
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2013, Intel Corporation
8*4882a593Smuzhiyun * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
9*4882a593Smuzhiyun * Mika Westerberg <mika.westerberg@linux.intel.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/dma-mapping.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/ioport.h>
21*4882a593Smuzhiyun #include <linux/acpi.h>
22*4882a593Smuzhiyun #include <linux/acpi_dma.h>
23*4882a593Smuzhiyun #include <linux/property.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static LIST_HEAD(acpi_dma_list);
26*4882a593Smuzhiyun static DEFINE_MUTEX(acpi_dma_lock);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun * acpi_dma_parse_resource_group - match device and parse resource group
30*4882a593Smuzhiyun * @grp: CSRT resource group
31*4882a593Smuzhiyun * @adev: ACPI device to match with
32*4882a593Smuzhiyun * @adma: struct acpi_dma of the given DMA controller
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * In order to match a device from DSDT table to the corresponding CSRT device
35*4882a593Smuzhiyun * we use MMIO address and IRQ.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Return:
38*4882a593Smuzhiyun * 1 on success, 0 when no information is available, or appropriate errno value
39*4882a593Smuzhiyun * on error.
40*4882a593Smuzhiyun */
acpi_dma_parse_resource_group(const struct acpi_csrt_group * grp,struct acpi_device * adev,struct acpi_dma * adma)41*4882a593Smuzhiyun static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
42*4882a593Smuzhiyun struct acpi_device *adev, struct acpi_dma *adma)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun const struct acpi_csrt_shared_info *si;
45*4882a593Smuzhiyun struct list_head resource_list;
46*4882a593Smuzhiyun struct resource_entry *rentry;
47*4882a593Smuzhiyun resource_size_t mem = 0, irq = 0;
48*4882a593Smuzhiyun int ret;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
51*4882a593Smuzhiyun return -ENODEV;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun INIT_LIST_HEAD(&resource_list);
54*4882a593Smuzhiyun ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
55*4882a593Smuzhiyun if (ret <= 0)
56*4882a593Smuzhiyun return 0;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun list_for_each_entry(rentry, &resource_list, node) {
59*4882a593Smuzhiyun if (resource_type(rentry->res) == IORESOURCE_MEM)
60*4882a593Smuzhiyun mem = rentry->res->start;
61*4882a593Smuzhiyun else if (resource_type(rentry->res) == IORESOURCE_IRQ)
62*4882a593Smuzhiyun irq = rentry->res->start;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun acpi_dev_free_resource_list(&resource_list);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Consider initial zero values as resource not found */
68*4882a593Smuzhiyun if (mem == 0 && irq == 0)
69*4882a593Smuzhiyun return 0;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun si = (const struct acpi_csrt_shared_info *)&grp[1];
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Match device by MMIO */
74*4882a593Smuzhiyun if (si->mmio_base_low != lower_32_bits(mem) ||
75*4882a593Smuzhiyun si->mmio_base_high != upper_32_bits(mem))
76*4882a593Smuzhiyun return 0;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Match device by Linux vIRQ */
79*4882a593Smuzhiyun ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
80*4882a593Smuzhiyun if (ret != irq)
81*4882a593Smuzhiyun return 0;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
84*4882a593Smuzhiyun (char *)&grp->vendor_id, grp->device_id, grp->revision);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Check if the request line range is available */
87*4882a593Smuzhiyun if (si->base_request_line == 0 && si->num_handshake_signals == 0)
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Set up DMA mask based on value from CSRT */
91*4882a593Smuzhiyun ret = dma_coerce_mask_and_coherent(&adev->dev,
92*4882a593Smuzhiyun DMA_BIT_MASK(si->dma_address_width));
93*4882a593Smuzhiyun if (ret)
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun adma->base_request_line = si->base_request_line;
97*4882a593Smuzhiyun adma->end_request_line = si->base_request_line +
98*4882a593Smuzhiyun si->num_handshake_signals - 1;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
101*4882a593Smuzhiyun adma->base_request_line, adma->end_request_line);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return 1;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
108*4882a593Smuzhiyun * @adev: ACPI device to match with
109*4882a593Smuzhiyun * @adma: struct acpi_dma of the given DMA controller
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * CSRT or Core System Resources Table is a proprietary ACPI table
112*4882a593Smuzhiyun * introduced by Microsoft. This table can contain devices that are not in
113*4882a593Smuzhiyun * the system DSDT table. In particular DMA controllers might be described
114*4882a593Smuzhiyun * here.
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * We are using this table to get the request line range of the specific DMA
117*4882a593Smuzhiyun * controller to be used later.
118*4882a593Smuzhiyun */
acpi_dma_parse_csrt(struct acpi_device * adev,struct acpi_dma * adma)119*4882a593Smuzhiyun static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct acpi_csrt_group *grp, *end;
122*4882a593Smuzhiyun struct acpi_table_csrt *csrt;
123*4882a593Smuzhiyun acpi_status status;
124*4882a593Smuzhiyun int ret;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun status = acpi_get_table(ACPI_SIG_CSRT, 0,
127*4882a593Smuzhiyun (struct acpi_table_header **)&csrt);
128*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
129*4882a593Smuzhiyun if (status != AE_NOT_FOUND)
130*4882a593Smuzhiyun dev_warn(&adev->dev, "failed to get the CSRT table\n");
131*4882a593Smuzhiyun return;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun grp = (struct acpi_csrt_group *)(csrt + 1);
135*4882a593Smuzhiyun end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun while (grp < end) {
138*4882a593Smuzhiyun ret = acpi_dma_parse_resource_group(grp, adev, adma);
139*4882a593Smuzhiyun if (ret < 0) {
140*4882a593Smuzhiyun dev_warn(&adev->dev,
141*4882a593Smuzhiyun "error in parsing resource group\n");
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun acpi_put_table((struct acpi_table_header *)csrt);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
153*4882a593Smuzhiyun * @dev: struct device of DMA controller
154*4882a593Smuzhiyun * @acpi_dma_xlate: translation function which converts a dma specifier
155*4882a593Smuzhiyun * into a dma_chan structure
156*4882a593Smuzhiyun * @data: pointer to controller specific data to be used by
157*4882a593Smuzhiyun * translation function
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Allocated memory should be freed with appropriate acpi_dma_controller_free()
160*4882a593Smuzhiyun * call.
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * Return:
163*4882a593Smuzhiyun * 0 on success or appropriate errno value on error.
164*4882a593Smuzhiyun */
acpi_dma_controller_register(struct device * dev,struct dma_chan * (* acpi_dma_xlate)(struct acpi_dma_spec *,struct acpi_dma *),void * data)165*4882a593Smuzhiyun int acpi_dma_controller_register(struct device *dev,
166*4882a593Smuzhiyun struct dma_chan *(*acpi_dma_xlate)
167*4882a593Smuzhiyun (struct acpi_dma_spec *, struct acpi_dma *),
168*4882a593Smuzhiyun void *data)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct acpi_device *adev;
171*4882a593Smuzhiyun struct acpi_dma *adma;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (!dev || !acpi_dma_xlate)
174*4882a593Smuzhiyun return -EINVAL;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Check if the device was enumerated by ACPI */
177*4882a593Smuzhiyun adev = ACPI_COMPANION(dev);
178*4882a593Smuzhiyun if (!adev)
179*4882a593Smuzhiyun return -EINVAL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun adma = kzalloc(sizeof(*adma), GFP_KERNEL);
182*4882a593Smuzhiyun if (!adma)
183*4882a593Smuzhiyun return -ENOMEM;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun adma->dev = dev;
186*4882a593Smuzhiyun adma->acpi_dma_xlate = acpi_dma_xlate;
187*4882a593Smuzhiyun adma->data = data;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun acpi_dma_parse_csrt(adev, adma);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Now queue acpi_dma controller structure in list */
192*4882a593Smuzhiyun mutex_lock(&acpi_dma_lock);
193*4882a593Smuzhiyun list_add_tail(&adma->dma_controllers, &acpi_dma_list);
194*4882a593Smuzhiyun mutex_unlock(&acpi_dma_lock);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
202*4882a593Smuzhiyun * @dev: struct device of DMA controller
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * Memory allocated by acpi_dma_controller_register() is freed here.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Return:
207*4882a593Smuzhiyun * 0 on success or appropriate errno value on error.
208*4882a593Smuzhiyun */
acpi_dma_controller_free(struct device * dev)209*4882a593Smuzhiyun int acpi_dma_controller_free(struct device *dev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct acpi_dma *adma;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (!dev)
214*4882a593Smuzhiyun return -EINVAL;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun mutex_lock(&acpi_dma_lock);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
219*4882a593Smuzhiyun if (adma->dev == dev) {
220*4882a593Smuzhiyun list_del(&adma->dma_controllers);
221*4882a593Smuzhiyun mutex_unlock(&acpi_dma_lock);
222*4882a593Smuzhiyun kfree(adma);
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun mutex_unlock(&acpi_dma_lock);
227*4882a593Smuzhiyun return -ENODEV;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
230*4882a593Smuzhiyun
devm_acpi_dma_release(struct device * dev,void * res)231*4882a593Smuzhiyun static void devm_acpi_dma_release(struct device *dev, void *res)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun acpi_dma_controller_free(dev);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /**
237*4882a593Smuzhiyun * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
238*4882a593Smuzhiyun * @dev: device that is registering this DMA controller
239*4882a593Smuzhiyun * @acpi_dma_xlate: translation function
240*4882a593Smuzhiyun * @data: pointer to controller specific data
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * Managed acpi_dma_controller_register(). DMA controller registered by this
243*4882a593Smuzhiyun * function are automatically freed on driver detach. See
244*4882a593Smuzhiyun * acpi_dma_controller_register() for more information.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * Return:
247*4882a593Smuzhiyun * 0 on success or appropriate errno value on error.
248*4882a593Smuzhiyun */
devm_acpi_dma_controller_register(struct device * dev,struct dma_chan * (* acpi_dma_xlate)(struct acpi_dma_spec *,struct acpi_dma *),void * data)249*4882a593Smuzhiyun int devm_acpi_dma_controller_register(struct device *dev,
250*4882a593Smuzhiyun struct dma_chan *(*acpi_dma_xlate)
251*4882a593Smuzhiyun (struct acpi_dma_spec *, struct acpi_dma *),
252*4882a593Smuzhiyun void *data)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun void *res;
255*4882a593Smuzhiyun int ret;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
258*4882a593Smuzhiyun if (!res)
259*4882a593Smuzhiyun return -ENOMEM;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
262*4882a593Smuzhiyun if (ret) {
263*4882a593Smuzhiyun devres_free(res);
264*4882a593Smuzhiyun return ret;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun devres_add(dev, res);
267*4882a593Smuzhiyun return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
273*4882a593Smuzhiyun * @dev: device that is unregistering as DMA controller
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * Unregister a DMA controller registered with
276*4882a593Smuzhiyun * devm_acpi_dma_controller_register(). Normally this function will not need to
277*4882a593Smuzhiyun * be called and the resource management code will ensure that the resource is
278*4882a593Smuzhiyun * freed.
279*4882a593Smuzhiyun */
devm_acpi_dma_controller_free(struct device * dev)280*4882a593Smuzhiyun void devm_acpi_dma_controller_free(struct device *dev)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
288*4882a593Smuzhiyun * @adma: struct acpi_dma of DMA controller
289*4882a593Smuzhiyun * @dma_spec: dma specifier to update
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
292*4882a593Smuzhiyun * Descriptor":
293*4882a593Smuzhiyun * DMA Request Line bits is a platform-relative number uniquely
294*4882a593Smuzhiyun * identifying the request line assigned. Request line-to-Controller
295*4882a593Smuzhiyun * mapping is done in a controller-specific OS driver.
296*4882a593Smuzhiyun * That's why we can safely adjust slave_id when the appropriate controller is
297*4882a593Smuzhiyun * found.
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * Return:
300*4882a593Smuzhiyun * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
301*4882a593Smuzhiyun */
acpi_dma_update_dma_spec(struct acpi_dma * adma,struct acpi_dma_spec * dma_spec)302*4882a593Smuzhiyun static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
303*4882a593Smuzhiyun struct acpi_dma_spec *dma_spec)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun /* Set link to the DMA controller device */
306*4882a593Smuzhiyun dma_spec->dev = adma->dev;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Check if the request line range is available */
309*4882a593Smuzhiyun if (adma->base_request_line == 0 && adma->end_request_line == 0)
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Check if slave_id falls to the range */
313*4882a593Smuzhiyun if (dma_spec->slave_id < adma->base_request_line ||
314*4882a593Smuzhiyun dma_spec->slave_id > adma->end_request_line)
315*4882a593Smuzhiyun return -1;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * Here we adjust slave_id. It should be a relative number to the base
319*4882a593Smuzhiyun * request line.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun dma_spec->slave_id -= adma->base_request_line;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return 1;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun struct acpi_dma_parser_data {
327*4882a593Smuzhiyun struct acpi_dma_spec dma_spec;
328*4882a593Smuzhiyun size_t index;
329*4882a593Smuzhiyun size_t n;
330*4882a593Smuzhiyun };
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
334*4882a593Smuzhiyun * @res: struct acpi_resource to get FixedDMA resources from
335*4882a593Smuzhiyun * @data: pointer to a helper struct acpi_dma_parser_data
336*4882a593Smuzhiyun */
acpi_dma_parse_fixed_dma(struct acpi_resource * res,void * data)337*4882a593Smuzhiyun static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct acpi_dma_parser_data *pdata = data;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
342*4882a593Smuzhiyun struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (pdata->n++ == pdata->index) {
345*4882a593Smuzhiyun pdata->dma_spec.chan_id = dma->channels;
346*4882a593Smuzhiyun pdata->dma_spec.slave_id = dma->request_lines;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Tell the ACPI core to skip this resource */
351*4882a593Smuzhiyun return 1;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
356*4882a593Smuzhiyun * @dev: struct device to get DMA request from
357*4882a593Smuzhiyun * @index: index of FixedDMA descriptor for @dev
358*4882a593Smuzhiyun *
359*4882a593Smuzhiyun * Return:
360*4882a593Smuzhiyun * Pointer to appropriate dma channel on success or an error pointer.
361*4882a593Smuzhiyun */
acpi_dma_request_slave_chan_by_index(struct device * dev,size_t index)362*4882a593Smuzhiyun struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
363*4882a593Smuzhiyun size_t index)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct acpi_dma_parser_data pdata;
366*4882a593Smuzhiyun struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
367*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(dev);
368*4882a593Smuzhiyun struct list_head resource_list;
369*4882a593Smuzhiyun struct acpi_dma *adma;
370*4882a593Smuzhiyun struct dma_chan *chan = NULL;
371*4882a593Smuzhiyun int found;
372*4882a593Smuzhiyun int ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun memset(&pdata, 0, sizeof(pdata));
375*4882a593Smuzhiyun pdata.index = index;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Initial values for the request line and channel */
378*4882a593Smuzhiyun dma_spec->chan_id = -1;
379*4882a593Smuzhiyun dma_spec->slave_id = -1;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun INIT_LIST_HEAD(&resource_list);
382*4882a593Smuzhiyun ret = acpi_dev_get_resources(adev, &resource_list,
383*4882a593Smuzhiyun acpi_dma_parse_fixed_dma, &pdata);
384*4882a593Smuzhiyun acpi_dev_free_resource_list(&resource_list);
385*4882a593Smuzhiyun if (ret < 0)
386*4882a593Smuzhiyun return ERR_PTR(ret);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
389*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun mutex_lock(&acpi_dma_lock);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * We are not going to call translation function if slave_id
396*4882a593Smuzhiyun * doesn't fall to the request range.
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun found = acpi_dma_update_dma_spec(adma, dma_spec);
399*4882a593Smuzhiyun if (found < 0)
400*4882a593Smuzhiyun continue;
401*4882a593Smuzhiyun chan = adma->acpi_dma_xlate(dma_spec, adma);
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * Try to get a channel only from the DMA controller that
404*4882a593Smuzhiyun * matches the slave_id. See acpi_dma_update_dma_spec()
405*4882a593Smuzhiyun * description for the details.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun if (found > 0 || chan)
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun mutex_unlock(&acpi_dma_lock);
412*4882a593Smuzhiyun return chan ? chan : ERR_PTR(-EPROBE_DEFER);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /**
417*4882a593Smuzhiyun * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
418*4882a593Smuzhiyun * @dev: struct device to get DMA request from
419*4882a593Smuzhiyun * @name: represents corresponding FixedDMA descriptor for @dev
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * In order to support both Device Tree and ACPI in a single driver we
422*4882a593Smuzhiyun * translate the names "tx" and "rx" here based on the most common case where
423*4882a593Smuzhiyun * the first FixedDMA descriptor is TX and second is RX.
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * If the device has "dma-names" property the FixedDMA descriptor indices
426*4882a593Smuzhiyun * are retrieved based on those. Otherwise the function falls back using
427*4882a593Smuzhiyun * hardcoded indices.
428*4882a593Smuzhiyun *
429*4882a593Smuzhiyun * Return:
430*4882a593Smuzhiyun * Pointer to appropriate dma channel on success or an error pointer.
431*4882a593Smuzhiyun */
acpi_dma_request_slave_chan_by_name(struct device * dev,const char * name)432*4882a593Smuzhiyun struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
433*4882a593Smuzhiyun const char *name)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun int index;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun index = device_property_match_string(dev, "dma-names", name);
438*4882a593Smuzhiyun if (index < 0) {
439*4882a593Smuzhiyun if (!strcmp(name, "tx"))
440*4882a593Smuzhiyun index = 0;
441*4882a593Smuzhiyun else if (!strcmp(name, "rx"))
442*4882a593Smuzhiyun index = 1;
443*4882a593Smuzhiyun else
444*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun dev_dbg(dev, "Looking for DMA channel \"%s\" at index %d...\n", name, index);
448*4882a593Smuzhiyun return acpi_dma_request_slave_chan_by_index(dev, index);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
454*4882a593Smuzhiyun * @dma_spec: pointer to ACPI DMA specifier
455*4882a593Smuzhiyun * @adma: pointer to ACPI DMA controller data
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * A simple translation function for ACPI based devices. Passes &struct
458*4882a593Smuzhiyun * dma_spec to the DMA controller driver provided filter function.
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * Return:
461*4882a593Smuzhiyun * Pointer to the channel if found or %NULL otherwise.
462*4882a593Smuzhiyun */
acpi_dma_simple_xlate(struct acpi_dma_spec * dma_spec,struct acpi_dma * adma)463*4882a593Smuzhiyun struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
464*4882a593Smuzhiyun struct acpi_dma *adma)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct acpi_dma_filter_info *info = adma->data;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (!info || !info->filter_fn)
469*4882a593Smuzhiyun return NULL;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
474