1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (C) 2019-2020 Linaro Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/bitfield.h>
9*4882a593Smuzhiyun #include <linux/bug.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/iommu.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/soc/qcom/smem.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "ipa.h"
16*4882a593Smuzhiyun #include "ipa_reg.h"
17*4882a593Smuzhiyun #include "ipa_data.h"
18*4882a593Smuzhiyun #include "ipa_cmd.h"
19*4882a593Smuzhiyun #include "ipa_mem.h"
20*4882a593Smuzhiyun #include "ipa_table.h"
21*4882a593Smuzhiyun #include "gsi_trans.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* "Canary" value placed between memory regions to detect overflow */
24*4882a593Smuzhiyun #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* SMEM host id representing the modem. */
27*4882a593Smuzhiyun #define QCOM_SMEM_HOST_MODEM 1
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Add an immediate command to a transaction that zeroes a memory region */
30*4882a593Smuzhiyun static void
ipa_mem_zero_region_add(struct gsi_trans * trans,const struct ipa_mem * mem)31*4882a593Smuzhiyun ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
34*4882a593Smuzhiyun dma_addr_t addr = ipa->zero_addr;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (!mem->size)
37*4882a593Smuzhiyun return;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
44*4882a593Smuzhiyun * @ipa: IPA pointer
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Set up the shared memory regions in IPA local memory. This involves
47*4882a593Smuzhiyun * zero-filling memory regions, and in the case of header memory, telling
48*4882a593Smuzhiyun * the IPA where it's located.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * This function performs the initial setup of this memory. If the modem
51*4882a593Smuzhiyun * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * The AP informs the modem where its portions of memory are located
54*4882a593Smuzhiyun * in a QMI exchange that occurs at modem startup.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Return: 0 if successful, or a negative error code
57*4882a593Smuzhiyun */
ipa_mem_setup(struct ipa * ipa)58*4882a593Smuzhiyun int ipa_mem_setup(struct ipa *ipa)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun dma_addr_t addr = ipa->zero_addr;
61*4882a593Smuzhiyun struct gsi_trans *trans;
62*4882a593Smuzhiyun u32 offset;
63*4882a593Smuzhiyun u16 size;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Get a transaction to define the header memory region and to zero
66*4882a593Smuzhiyun * the processing context and modem memory regions.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun trans = ipa_cmd_trans_alloc(ipa, 4);
69*4882a593Smuzhiyun if (!trans) {
70*4882a593Smuzhiyun dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
71*4882a593Smuzhiyun return -EBUSY;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Initialize IPA-local header memory. The modem and AP header
75*4882a593Smuzhiyun * regions are contiguous, and initialized together.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
78*4882a593Smuzhiyun size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
79*4882a593Smuzhiyun size += ipa->mem[IPA_MEM_AP_HEADER].size;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun gsi_trans_commit_wait(trans);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Tell the hardware where the processing context area is located */
92*4882a593Smuzhiyun iowrite32(ipa->mem_offset + offset,
93*4882a593Smuzhiyun ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
ipa_mem_teardown(struct ipa * ipa)98*4882a593Smuzhiyun void ipa_mem_teardown(struct ipa *ipa)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun /* Nothing to do */
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #ifdef IPA_VALIDATE
104*4882a593Smuzhiyun
ipa_mem_valid(struct ipa * ipa,enum ipa_mem_id mem_id)105*4882a593Smuzhiyun static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun const struct ipa_mem *mem = &ipa->mem[mem_id];
108*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
109*4882a593Smuzhiyun u16 size_multiple;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Other than modem memory, sizes must be a multiple of 8 */
112*4882a593Smuzhiyun size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
113*4882a593Smuzhiyun if (mem->size % size_multiple)
114*4882a593Smuzhiyun dev_err(dev, "region %u size not a multiple of %u bytes\n",
115*4882a593Smuzhiyun mem_id, size_multiple);
116*4882a593Smuzhiyun else if (mem->offset % 8)
117*4882a593Smuzhiyun dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
118*4882a593Smuzhiyun else if (mem->offset < mem->canary_count * sizeof(__le32))
119*4882a593Smuzhiyun dev_err(dev, "region %u offset too small for %hu canaries\n",
120*4882a593Smuzhiyun mem_id, mem->canary_count);
121*4882a593Smuzhiyun else if (mem->offset + mem->size > ipa->mem_size)
122*4882a593Smuzhiyun dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
123*4882a593Smuzhiyun mem_id, ipa->mem_size);
124*4882a593Smuzhiyun else
125*4882a593Smuzhiyun return true;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return false;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #else /* !IPA_VALIDATE */
131*4882a593Smuzhiyun
ipa_mem_valid(struct ipa * ipa,enum ipa_mem_id mem_id)132*4882a593Smuzhiyun static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return true;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #endif /*! IPA_VALIDATE */
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * ipa_mem_config() - Configure IPA shared memory
141*4882a593Smuzhiyun * @ipa: IPA pointer
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * Return: 0 if successful, or a negative error code
144*4882a593Smuzhiyun */
ipa_mem_config(struct ipa * ipa)145*4882a593Smuzhiyun int ipa_mem_config(struct ipa *ipa)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
148*4882a593Smuzhiyun enum ipa_mem_id mem_id;
149*4882a593Smuzhiyun dma_addr_t addr;
150*4882a593Smuzhiyun u32 mem_size;
151*4882a593Smuzhiyun void *virt;
152*4882a593Smuzhiyun u32 val;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Check the advertised location and size of the shared memory area */
155*4882a593Smuzhiyun val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* The fields in the register are in 8 byte units */
158*4882a593Smuzhiyun ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
159*4882a593Smuzhiyun /* Make sure the end is within the region's mapped space */
160*4882a593Smuzhiyun mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* If the sizes don't match, issue a warning */
163*4882a593Smuzhiyun if (ipa->mem_offset + mem_size > ipa->mem_size) {
164*4882a593Smuzhiyun dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
165*4882a593Smuzhiyun mem_size);
166*4882a593Smuzhiyun } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
167*4882a593Smuzhiyun dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
168*4882a593Smuzhiyun mem_size);
169*4882a593Smuzhiyun ipa->mem_size = mem_size;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Prealloc DMA memory for zeroing regions */
173*4882a593Smuzhiyun virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
174*4882a593Smuzhiyun if (!virt)
175*4882a593Smuzhiyun return -ENOMEM;
176*4882a593Smuzhiyun ipa->zero_addr = addr;
177*4882a593Smuzhiyun ipa->zero_virt = virt;
178*4882a593Smuzhiyun ipa->zero_size = IPA_MEM_MAX;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Verify each defined memory region is valid, and if indicated
181*4882a593Smuzhiyun * for the region, write "canary" values in the space prior to
182*4882a593Smuzhiyun * the region's base address.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
185*4882a593Smuzhiyun const struct ipa_mem *mem = &ipa->mem[mem_id];
186*4882a593Smuzhiyun u16 canary_count;
187*4882a593Smuzhiyun __le32 *canary;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Validate all regions (even undefined ones) */
190*4882a593Smuzhiyun if (!ipa_mem_valid(ipa, mem_id))
191*4882a593Smuzhiyun goto err_dma_free;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Skip over undefined regions */
194*4882a593Smuzhiyun if (!mem->offset && !mem->size)
195*4882a593Smuzhiyun continue;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun canary_count = mem->canary_count;
198*4882a593Smuzhiyun if (!canary_count)
199*4882a593Smuzhiyun continue;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Write canary values in the space before the region */
202*4882a593Smuzhiyun canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
203*4882a593Smuzhiyun do
204*4882a593Smuzhiyun *--canary = IPA_MEM_CANARY_VAL;
205*4882a593Smuzhiyun while (--canary_count);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Make sure filter and route table memory regions are valid */
209*4882a593Smuzhiyun if (!ipa_table_valid(ipa))
210*4882a593Smuzhiyun goto err_dma_free;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* Validate memory-related properties relevant to immediate commands */
213*4882a593Smuzhiyun if (!ipa_cmd_data_valid(ipa))
214*4882a593Smuzhiyun goto err_dma_free;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Verify the microcontroller ring alignment (0 is OK too) */
217*4882a593Smuzhiyun if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
218*4882a593Smuzhiyun dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
219*4882a593Smuzhiyun goto err_dma_free;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun err_dma_free:
225*4882a593Smuzhiyun dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return -EINVAL;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Inverse of ipa_mem_config() */
ipa_mem_deconfig(struct ipa * ipa)231*4882a593Smuzhiyun void ipa_mem_deconfig(struct ipa *ipa)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
236*4882a593Smuzhiyun ipa->zero_size = 0;
237*4882a593Smuzhiyun ipa->zero_virt = NULL;
238*4882a593Smuzhiyun ipa->zero_addr = 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
243*4882a593Smuzhiyun * @ipa: IPA pointer
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * Zero regions of IPA-local memory used by the modem. These are configured
246*4882a593Smuzhiyun * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
247*4882a593Smuzhiyun * restarts via SSR we need to re-initialize them. A QMI message tells the
248*4882a593Smuzhiyun * modem where to find regions of IPA local memory it needs to know about
249*4882a593Smuzhiyun * (these included).
250*4882a593Smuzhiyun */
ipa_mem_zero_modem(struct ipa * ipa)251*4882a593Smuzhiyun int ipa_mem_zero_modem(struct ipa *ipa)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct gsi_trans *trans;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* Get a transaction to zero the modem memory, modem header,
256*4882a593Smuzhiyun * and modem processing context regions.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun trans = ipa_cmd_trans_alloc(ipa, 3);
259*4882a593Smuzhiyun if (!trans) {
260*4882a593Smuzhiyun dev_err(&ipa->pdev->dev,
261*4882a593Smuzhiyun "no transaction to zero modem memory\n");
262*4882a593Smuzhiyun return -EBUSY;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun gsi_trans_commit_wait(trans);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * ipa_imem_init() - Initialize IMEM memory used by the IPA
278*4882a593Smuzhiyun * @ipa: IPA pointer
279*4882a593Smuzhiyun * @addr: Physical address of the IPA region in IMEM
280*4882a593Smuzhiyun * @size: Size (bytes) of the IPA region in IMEM
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * IMEM is a block of shared memory separate from system DRAM, and
283*4882a593Smuzhiyun * a portion of this memory is available for the IPA to use. The
284*4882a593Smuzhiyun * modem accesses this memory directly, but the IPA accesses it
285*4882a593Smuzhiyun * via the IOMMU, using the AP's credentials.
286*4882a593Smuzhiyun *
287*4882a593Smuzhiyun * If this region exists (size > 0) we map it for read/write access
288*4882a593Smuzhiyun * through the IOMMU using the IPA device.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Note: @addr and @size are not guaranteed to be page-aligned.
291*4882a593Smuzhiyun */
ipa_imem_init(struct ipa * ipa,unsigned long addr,size_t size)292*4882a593Smuzhiyun static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
295*4882a593Smuzhiyun struct iommu_domain *domain;
296*4882a593Smuzhiyun unsigned long iova;
297*4882a593Smuzhiyun phys_addr_t phys;
298*4882a593Smuzhiyun int ret;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!size)
301*4882a593Smuzhiyun return 0; /* IMEM memory not used */
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
304*4882a593Smuzhiyun if (!domain) {
305*4882a593Smuzhiyun dev_err(dev, "no IOMMU domain found for IMEM\n");
306*4882a593Smuzhiyun return -EINVAL;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Align the address down and the size up to page boundaries */
310*4882a593Smuzhiyun phys = addr & PAGE_MASK;
311*4882a593Smuzhiyun size = PAGE_ALIGN(size + addr - phys);
312*4882a593Smuzhiyun iova = phys; /* We just want a direct mapping */
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
315*4882a593Smuzhiyun if (ret)
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun ipa->imem_iova = iova;
319*4882a593Smuzhiyun ipa->imem_size = size;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
ipa_imem_exit(struct ipa * ipa)324*4882a593Smuzhiyun static void ipa_imem_exit(struct ipa *ipa)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct iommu_domain *domain;
327*4882a593Smuzhiyun struct device *dev;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (!ipa->imem_size)
330*4882a593Smuzhiyun return;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun dev = &ipa->pdev->dev;
333*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
334*4882a593Smuzhiyun if (domain) {
335*4882a593Smuzhiyun size_t size;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
338*4882a593Smuzhiyun if (size != ipa->imem_size)
339*4882a593Smuzhiyun dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
340*4882a593Smuzhiyun size, ipa->imem_size);
341*4882a593Smuzhiyun } else {
342*4882a593Smuzhiyun dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun ipa->imem_size = 0;
346*4882a593Smuzhiyun ipa->imem_iova = 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * ipa_smem_init() - Initialize SMEM memory used by the IPA
351*4882a593Smuzhiyun * @ipa: IPA pointer
352*4882a593Smuzhiyun * @item: Item ID of SMEM memory
353*4882a593Smuzhiyun * @size: Size (bytes) of SMEM memory region
354*4882a593Smuzhiyun *
355*4882a593Smuzhiyun * SMEM is a managed block of shared DRAM, from which numbered "items"
356*4882a593Smuzhiyun * can be allocated. One item is designated for use by the IPA.
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun * The modem accesses SMEM memory directly, but the IPA accesses it
359*4882a593Smuzhiyun * via the IOMMU, using the AP's credentials.
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * If size provided is non-zero, we allocate it and map it for
362*4882a593Smuzhiyun * access through the IOMMU.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * Note: @size and the item address are is not guaranteed to be page-aligned.
365*4882a593Smuzhiyun */
ipa_smem_init(struct ipa * ipa,u32 item,size_t size)366*4882a593Smuzhiyun static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
369*4882a593Smuzhiyun struct iommu_domain *domain;
370*4882a593Smuzhiyun unsigned long iova;
371*4882a593Smuzhiyun phys_addr_t phys;
372*4882a593Smuzhiyun phys_addr_t addr;
373*4882a593Smuzhiyun size_t actual;
374*4882a593Smuzhiyun void *virt;
375*4882a593Smuzhiyun int ret;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (!size)
378*4882a593Smuzhiyun return 0; /* SMEM memory not used */
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* SMEM is memory shared between the AP and another system entity
381*4882a593Smuzhiyun * (in this case, the modem). An allocation from SMEM is persistent
382*4882a593Smuzhiyun * until the AP reboots; there is no way to free an allocated SMEM
383*4882a593Smuzhiyun * region. Allocation only reserves the space; to use it you need
384*4882a593Smuzhiyun * to "get" a pointer it (this implies no reference counting).
385*4882a593Smuzhiyun * The item might have already been allocated, in which case we
386*4882a593Smuzhiyun * use it unless the size isn't what we expect.
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
389*4882a593Smuzhiyun if (ret && ret != -EEXIST) {
390*4882a593Smuzhiyun dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
391*4882a593Smuzhiyun ret, size, item);
392*4882a593Smuzhiyun return ret;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Now get the address of the SMEM memory region */
396*4882a593Smuzhiyun virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
397*4882a593Smuzhiyun if (IS_ERR(virt)) {
398*4882a593Smuzhiyun ret = PTR_ERR(virt);
399*4882a593Smuzhiyun dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
400*4882a593Smuzhiyun return ret;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* In case the region was already allocated, verify the size */
404*4882a593Smuzhiyun if (ret && actual != size) {
405*4882a593Smuzhiyun dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
406*4882a593Smuzhiyun item, actual, size);
407*4882a593Smuzhiyun return -EINVAL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
411*4882a593Smuzhiyun if (!domain) {
412*4882a593Smuzhiyun dev_err(dev, "no IOMMU domain found for SMEM\n");
413*4882a593Smuzhiyun return -EINVAL;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* Align the address down and the size up to a page boundary */
417*4882a593Smuzhiyun addr = qcom_smem_virt_to_phys(virt);
418*4882a593Smuzhiyun phys = addr & PAGE_MASK;
419*4882a593Smuzhiyun size = PAGE_ALIGN(size + addr - phys);
420*4882a593Smuzhiyun iova = phys; /* We just want a direct mapping */
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
423*4882a593Smuzhiyun if (ret)
424*4882a593Smuzhiyun return ret;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun ipa->smem_iova = iova;
427*4882a593Smuzhiyun ipa->smem_size = size;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
ipa_smem_exit(struct ipa * ipa)432*4882a593Smuzhiyun static void ipa_smem_exit(struct ipa *ipa)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
435*4882a593Smuzhiyun struct iommu_domain *domain;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
438*4882a593Smuzhiyun if (domain) {
439*4882a593Smuzhiyun size_t size;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
442*4882a593Smuzhiyun if (size != ipa->smem_size)
443*4882a593Smuzhiyun dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
444*4882a593Smuzhiyun size, ipa->smem_size);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun } else {
447*4882a593Smuzhiyun dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun ipa->smem_size = 0;
451*4882a593Smuzhiyun ipa->smem_iova = 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Perform memory region-related initialization */
ipa_mem_init(struct ipa * ipa,const struct ipa_mem_data * mem_data)455*4882a593Smuzhiyun int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
458*4882a593Smuzhiyun struct resource *res;
459*4882a593Smuzhiyun int ret;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (mem_data->local_count > IPA_MEM_COUNT) {
462*4882a593Smuzhiyun dev_err(dev, "to many memory regions (%u > %u)\n",
463*4882a593Smuzhiyun mem_data->local_count, IPA_MEM_COUNT);
464*4882a593Smuzhiyun return -EINVAL;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
468*4882a593Smuzhiyun if (ret) {
469*4882a593Smuzhiyun dev_err(dev, "error %d setting DMA mask\n", ret);
470*4882a593Smuzhiyun return ret;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
474*4882a593Smuzhiyun "ipa-shared");
475*4882a593Smuzhiyun if (!res) {
476*4882a593Smuzhiyun dev_err(dev,
477*4882a593Smuzhiyun "DT error getting \"ipa-shared\" memory property\n");
478*4882a593Smuzhiyun return -ENODEV;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
482*4882a593Smuzhiyun if (!ipa->mem_virt) {
483*4882a593Smuzhiyun dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
484*4882a593Smuzhiyun return -ENOMEM;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun ipa->mem_addr = res->start;
488*4882a593Smuzhiyun ipa->mem_size = resource_size(res);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
491*4882a593Smuzhiyun ipa->mem_count = mem_data->local_count;
492*4882a593Smuzhiyun ipa->mem = mem_data->local;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
495*4882a593Smuzhiyun if (ret)
496*4882a593Smuzhiyun goto err_unmap;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
499*4882a593Smuzhiyun if (ret)
500*4882a593Smuzhiyun goto err_imem_exit;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun err_imem_exit:
505*4882a593Smuzhiyun ipa_imem_exit(ipa);
506*4882a593Smuzhiyun err_unmap:
507*4882a593Smuzhiyun memunmap(ipa->mem_virt);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun return ret;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* Inverse of ipa_mem_init() */
ipa_mem_exit(struct ipa * ipa)513*4882a593Smuzhiyun void ipa_mem_exit(struct ipa *ipa)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun ipa_smem_exit(ipa);
516*4882a593Smuzhiyun ipa_imem_exit(ipa);
517*4882a593Smuzhiyun memunmap(ipa->mem_virt);
518*4882a593Smuzhiyun }
519