1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2015, Sony Mobile Communications AB.
4*4882a593Smuzhiyun * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/hwspinlock.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun #include <linux/of_address.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/sizes.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/soc/qcom/smem.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * The Qualcomm shared memory system is a allocate only heap structure that
19*4882a593Smuzhiyun * consists of one of more memory areas that can be accessed by the processors
20*4882a593Smuzhiyun * in the SoC.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * All systems contains a global heap, accessible by all processors in the SoC,
23*4882a593Smuzhiyun * with a table of contents data structure (@smem_header) at the beginning of
24*4882a593Smuzhiyun * the main shared memory block.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * The global header contains meta data for allocations as well as a fixed list
27*4882a593Smuzhiyun * of 512 entries (@smem_global_entry) that can be initialized to reference
28*4882a593Smuzhiyun * parts of the shared memory space.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * In addition to this global heap a set of "private" heaps can be set up at
32*4882a593Smuzhiyun * boot time with access restrictions so that only certain processor pairs can
33*4882a593Smuzhiyun * access the data.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * These partitions are referenced from an optional partition table
36*4882a593Smuzhiyun * (@smem_ptable), that is found 4kB from the end of the main smem region. The
37*4882a593Smuzhiyun * partition table entries (@smem_ptable_entry) lists the involved processors
38*4882a593Smuzhiyun * (or hosts) and their location in the main shared memory region.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Each partition starts with a header (@smem_partition_header) that identifies
41*4882a593Smuzhiyun * the partition and holds properties for the two internal memory regions. The
42*4882a593Smuzhiyun * two regions are cached and non-cached memory respectively. Each region
43*4882a593Smuzhiyun * contain a link list of allocation headers (@smem_private_entry) followed by
44*4882a593Smuzhiyun * their data.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Items in the non-cached region are allocated from the start of the partition
47*4882a593Smuzhiyun * while items in the cached region are allocated from the end. The free area
48*4882a593Smuzhiyun * is hence the region between the cached and non-cached offsets. The header of
49*4882a593Smuzhiyun * cached items comes after the data.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
52*4882a593Smuzhiyun * for the global heap. A new global partition is created from the global heap
53*4882a593Smuzhiyun * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
54*4882a593Smuzhiyun * set by the bootloader.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * To synchronize allocations in the shared memory heaps a remote spinlock must
57*4882a593Smuzhiyun * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
58*4882a593Smuzhiyun * platforms.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * The version member of the smem header contains an array of versions for the
64*4882a593Smuzhiyun * various software components in the SoC. We verify that the boot loader
65*4882a593Smuzhiyun * version is a valid version as a sanity check.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun #define SMEM_MASTER_SBL_VERSION_INDEX 7
68*4882a593Smuzhiyun #define SMEM_GLOBAL_HEAP_VERSION 11
69*4882a593Smuzhiyun #define SMEM_GLOBAL_PART_VERSION 12
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * The first 8 items are only to be allocated by the boot loader while
73*4882a593Smuzhiyun * initializing the heap.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun #define SMEM_ITEM_LAST_FIXED 8
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* Highest accepted item number, for both global and private heaps */
78*4882a593Smuzhiyun #define SMEM_ITEM_COUNT 512
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Processor/host identifier for the application processor */
81*4882a593Smuzhiyun #define SMEM_HOST_APPS 0
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Processor/host identifier for the global partition */
84*4882a593Smuzhiyun #define SMEM_GLOBAL_HOST 0xfffe
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Max number of processors/hosts in a system */
87*4882a593Smuzhiyun #define SMEM_HOST_COUNT 11
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun * struct smem_proc_comm - proc_comm communication struct (legacy)
91*4882a593Smuzhiyun * @command: current command to be executed
92*4882a593Smuzhiyun * @status: status of the currently requested command
93*4882a593Smuzhiyun * @params: parameters to the command
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun struct smem_proc_comm {
96*4882a593Smuzhiyun __le32 command;
97*4882a593Smuzhiyun __le32 status;
98*4882a593Smuzhiyun __le32 params[2];
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * struct smem_global_entry - entry to reference smem items on the heap
103*4882a593Smuzhiyun * @allocated: boolean to indicate if this entry is used
104*4882a593Smuzhiyun * @offset: offset to the allocated space
105*4882a593Smuzhiyun * @size: size of the allocated space, 8 byte aligned
106*4882a593Smuzhiyun * @aux_base: base address for the memory region used by this unit, or 0 for
107*4882a593Smuzhiyun * the default region. bits 0,1 are reserved
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun struct smem_global_entry {
110*4882a593Smuzhiyun __le32 allocated;
111*4882a593Smuzhiyun __le32 offset;
112*4882a593Smuzhiyun __le32 size;
113*4882a593Smuzhiyun __le32 aux_base; /* bits 1:0 reserved */
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun #define AUX_BASE_MASK 0xfffffffc
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun * struct smem_header - header found in beginning of primary smem region
119*4882a593Smuzhiyun * @proc_comm: proc_comm communication interface (legacy)
120*4882a593Smuzhiyun * @version: array of versions for the various subsystems
121*4882a593Smuzhiyun * @initialized: boolean to indicate that smem is initialized
122*4882a593Smuzhiyun * @free_offset: index of the first unallocated byte in smem
123*4882a593Smuzhiyun * @available: number of bytes available for allocation
124*4882a593Smuzhiyun * @reserved: reserved field, must be 0
125*4882a593Smuzhiyun * toc: array of references to items
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun struct smem_header {
128*4882a593Smuzhiyun struct smem_proc_comm proc_comm[4];
129*4882a593Smuzhiyun __le32 version[32];
130*4882a593Smuzhiyun __le32 initialized;
131*4882a593Smuzhiyun __le32 free_offset;
132*4882a593Smuzhiyun __le32 available;
133*4882a593Smuzhiyun __le32 reserved;
134*4882a593Smuzhiyun struct smem_global_entry toc[SMEM_ITEM_COUNT];
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * struct smem_ptable_entry - one entry in the @smem_ptable list
139*4882a593Smuzhiyun * @offset: offset, within the main shared memory region, of the partition
140*4882a593Smuzhiyun * @size: size of the partition
141*4882a593Smuzhiyun * @flags: flags for the partition (currently unused)
142*4882a593Smuzhiyun * @host0: first processor/host with access to this partition
143*4882a593Smuzhiyun * @host1: second processor/host with access to this partition
144*4882a593Smuzhiyun * @cacheline: alignment for "cached" entries
145*4882a593Smuzhiyun * @reserved: reserved entries for later use
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun struct smem_ptable_entry {
148*4882a593Smuzhiyun __le32 offset;
149*4882a593Smuzhiyun __le32 size;
150*4882a593Smuzhiyun __le32 flags;
151*4882a593Smuzhiyun __le16 host0;
152*4882a593Smuzhiyun __le16 host1;
153*4882a593Smuzhiyun __le32 cacheline;
154*4882a593Smuzhiyun __le32 reserved[7];
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun * struct smem_ptable - partition table for the private partitions
159*4882a593Smuzhiyun * @magic: magic number, must be SMEM_PTABLE_MAGIC
160*4882a593Smuzhiyun * @version: version of the partition table
161*4882a593Smuzhiyun * @num_entries: number of partitions in the table
162*4882a593Smuzhiyun * @reserved: for now reserved entries
163*4882a593Smuzhiyun * @entry: list of @smem_ptable_entry for the @num_entries partitions
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun struct smem_ptable {
166*4882a593Smuzhiyun u8 magic[4];
167*4882a593Smuzhiyun __le32 version;
168*4882a593Smuzhiyun __le32 num_entries;
169*4882a593Smuzhiyun __le32 reserved[5];
170*4882a593Smuzhiyun struct smem_ptable_entry entry[];
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun * struct smem_partition_header - header of the partitions
177*4882a593Smuzhiyun * @magic: magic number, must be SMEM_PART_MAGIC
178*4882a593Smuzhiyun * @host0: first processor/host with access to this partition
179*4882a593Smuzhiyun * @host1: second processor/host with access to this partition
180*4882a593Smuzhiyun * @size: size of the partition
181*4882a593Smuzhiyun * @offset_free_uncached: offset to the first free byte of uncached memory in
182*4882a593Smuzhiyun * this partition
183*4882a593Smuzhiyun * @offset_free_cached: offset to the first free byte of cached memory in this
184*4882a593Smuzhiyun * partition
185*4882a593Smuzhiyun * @reserved: for now reserved entries
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun struct smem_partition_header {
188*4882a593Smuzhiyun u8 magic[4];
189*4882a593Smuzhiyun __le16 host0;
190*4882a593Smuzhiyun __le16 host1;
191*4882a593Smuzhiyun __le32 size;
192*4882a593Smuzhiyun __le32 offset_free_uncached;
193*4882a593Smuzhiyun __le32 offset_free_cached;
194*4882a593Smuzhiyun __le32 reserved[3];
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun * struct smem_private_entry - header of each item in the private partition
201*4882a593Smuzhiyun * @canary: magic number, must be SMEM_PRIVATE_CANARY
202*4882a593Smuzhiyun * @item: identifying number of the smem item
203*4882a593Smuzhiyun * @size: size of the data, including padding bytes
204*4882a593Smuzhiyun * @padding_data: number of bytes of padding of data
205*4882a593Smuzhiyun * @padding_hdr: number of bytes of padding between the header and the data
206*4882a593Smuzhiyun * @reserved: for now reserved entry
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun struct smem_private_entry {
209*4882a593Smuzhiyun u16 canary; /* bytes are the same so no swapping needed */
210*4882a593Smuzhiyun __le16 item;
211*4882a593Smuzhiyun __le32 size; /* includes padding bytes */
212*4882a593Smuzhiyun __le16 padding_data;
213*4882a593Smuzhiyun __le16 padding_hdr;
214*4882a593Smuzhiyun __le32 reserved;
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun #define SMEM_PRIVATE_CANARY 0xa5a5
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * struct smem_info - smem region info located after the table of contents
220*4882a593Smuzhiyun * @magic: magic number, must be SMEM_INFO_MAGIC
221*4882a593Smuzhiyun * @size: size of the smem region
222*4882a593Smuzhiyun * @base_addr: base address of the smem region
223*4882a593Smuzhiyun * @reserved: for now reserved entry
224*4882a593Smuzhiyun * @num_items: highest accepted item number
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun struct smem_info {
227*4882a593Smuzhiyun u8 magic[4];
228*4882a593Smuzhiyun __le32 size;
229*4882a593Smuzhiyun __le32 base_addr;
230*4882a593Smuzhiyun __le32 reserved;
231*4882a593Smuzhiyun __le16 num_items;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /**
237*4882a593Smuzhiyun * struct smem_region - representation of a chunk of memory used for smem
238*4882a593Smuzhiyun * @aux_base: identifier of aux_mem base
239*4882a593Smuzhiyun * @virt_base: virtual base address of memory with this aux_mem identifier
240*4882a593Smuzhiyun * @size: size of the memory region
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun struct smem_region {
243*4882a593Smuzhiyun u32 aux_base;
244*4882a593Smuzhiyun void __iomem *virt_base;
245*4882a593Smuzhiyun size_t size;
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /**
249*4882a593Smuzhiyun * struct qcom_smem - device data for the smem device
250*4882a593Smuzhiyun * @dev: device pointer
251*4882a593Smuzhiyun * @hwlock: reference to a hwspinlock
252*4882a593Smuzhiyun * @global_partition: pointer to global partition when in use
253*4882a593Smuzhiyun * @global_cacheline: cacheline size for global partition
254*4882a593Smuzhiyun * @partitions: list of pointers to partitions affecting the current
255*4882a593Smuzhiyun * processor/host
256*4882a593Smuzhiyun * @cacheline: list of cacheline sizes for each host
257*4882a593Smuzhiyun * @item_count: max accepted item number
258*4882a593Smuzhiyun * @num_regions: number of @regions
259*4882a593Smuzhiyun * @regions: list of the memory regions defining the shared memory
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun struct qcom_smem {
262*4882a593Smuzhiyun struct device *dev;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun struct hwspinlock *hwlock;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun struct smem_partition_header *global_partition;
267*4882a593Smuzhiyun size_t global_cacheline;
268*4882a593Smuzhiyun struct smem_partition_header *partitions[SMEM_HOST_COUNT];
269*4882a593Smuzhiyun size_t cacheline[SMEM_HOST_COUNT];
270*4882a593Smuzhiyun u32 item_count;
271*4882a593Smuzhiyun struct platform_device *socinfo;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun unsigned num_regions;
274*4882a593Smuzhiyun struct smem_region regions[];
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun static void *
phdr_to_last_uncached_entry(struct smem_partition_header * phdr)278*4882a593Smuzhiyun phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun void *p = phdr;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return p + le32_to_cpu(phdr->offset_free_uncached);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun static struct smem_private_entry *
phdr_to_first_cached_entry(struct smem_partition_header * phdr,size_t cacheline)286*4882a593Smuzhiyun phdr_to_first_cached_entry(struct smem_partition_header *phdr,
287*4882a593Smuzhiyun size_t cacheline)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun void *p = phdr;
290*4882a593Smuzhiyun struct smem_private_entry *e;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun static void *
phdr_to_last_cached_entry(struct smem_partition_header * phdr)296*4882a593Smuzhiyun phdr_to_last_cached_entry(struct smem_partition_header *phdr)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun void *p = phdr;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return p + le32_to_cpu(phdr->offset_free_cached);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun static struct smem_private_entry *
phdr_to_first_uncached_entry(struct smem_partition_header * phdr)304*4882a593Smuzhiyun phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun void *p = phdr;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return p + sizeof(*phdr);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun static struct smem_private_entry *
uncached_entry_next(struct smem_private_entry * e)312*4882a593Smuzhiyun uncached_entry_next(struct smem_private_entry *e)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun void *p = e;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
317*4882a593Smuzhiyun le32_to_cpu(e->size);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun static struct smem_private_entry *
cached_entry_next(struct smem_private_entry * e,size_t cacheline)321*4882a593Smuzhiyun cached_entry_next(struct smem_private_entry *e, size_t cacheline)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun void *p = e;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
uncached_entry_to_item(struct smem_private_entry * e)328*4882a593Smuzhiyun static void *uncached_entry_to_item(struct smem_private_entry *e)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun void *p = e;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
cached_entry_to_item(struct smem_private_entry * e)335*4882a593Smuzhiyun static void *cached_entry_to_item(struct smem_private_entry *e)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun void *p = e;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun return p - le32_to_cpu(e->size);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Pointer to the one and only smem handle */
343*4882a593Smuzhiyun static struct qcom_smem *__smem;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Timeout (ms) for the trylock of remote spinlocks */
346*4882a593Smuzhiyun #define HWSPINLOCK_TIMEOUT 1000
347*4882a593Smuzhiyun
qcom_smem_alloc_private(struct qcom_smem * smem,struct smem_partition_header * phdr,unsigned item,size_t size)348*4882a593Smuzhiyun static int qcom_smem_alloc_private(struct qcom_smem *smem,
349*4882a593Smuzhiyun struct smem_partition_header *phdr,
350*4882a593Smuzhiyun unsigned item,
351*4882a593Smuzhiyun size_t size)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct smem_private_entry *hdr, *end;
354*4882a593Smuzhiyun size_t alloc_size;
355*4882a593Smuzhiyun void *cached;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun hdr = phdr_to_first_uncached_entry(phdr);
358*4882a593Smuzhiyun end = phdr_to_last_uncached_entry(phdr);
359*4882a593Smuzhiyun cached = phdr_to_last_cached_entry(phdr);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun while (hdr < end) {
362*4882a593Smuzhiyun if (hdr->canary != SMEM_PRIVATE_CANARY)
363*4882a593Smuzhiyun goto bad_canary;
364*4882a593Smuzhiyun if (le16_to_cpu(hdr->item) == item)
365*4882a593Smuzhiyun return -EEXIST;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun hdr = uncached_entry_next(hdr);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Check that we don't grow into the cached region */
371*4882a593Smuzhiyun alloc_size = sizeof(*hdr) + ALIGN(size, 8);
372*4882a593Smuzhiyun if ((void *)hdr + alloc_size > cached) {
373*4882a593Smuzhiyun dev_err(smem->dev, "Out of memory\n");
374*4882a593Smuzhiyun return -ENOSPC;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun hdr->canary = SMEM_PRIVATE_CANARY;
378*4882a593Smuzhiyun hdr->item = cpu_to_le16(item);
379*4882a593Smuzhiyun hdr->size = cpu_to_le32(ALIGN(size, 8));
380*4882a593Smuzhiyun hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
381*4882a593Smuzhiyun hdr->padding_hdr = 0;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * Ensure the header is written before we advance the free offset, so
385*4882a593Smuzhiyun * that remote processors that does not take the remote spinlock still
386*4882a593Smuzhiyun * gets a consistent view of the linked list.
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun wmb();
389*4882a593Smuzhiyun le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun bad_canary:
393*4882a593Smuzhiyun dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
394*4882a593Smuzhiyun le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return -EINVAL;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
qcom_smem_alloc_global(struct qcom_smem * smem,unsigned item,size_t size)399*4882a593Smuzhiyun static int qcom_smem_alloc_global(struct qcom_smem *smem,
400*4882a593Smuzhiyun unsigned item,
401*4882a593Smuzhiyun size_t size)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct smem_global_entry *entry;
404*4882a593Smuzhiyun struct smem_header *header;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun header = smem->regions[0].virt_base;
407*4882a593Smuzhiyun entry = &header->toc[item];
408*4882a593Smuzhiyun if (entry->allocated)
409*4882a593Smuzhiyun return -EEXIST;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun size = ALIGN(size, 8);
412*4882a593Smuzhiyun if (WARN_ON(size > le32_to_cpu(header->available)))
413*4882a593Smuzhiyun return -ENOMEM;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun entry->offset = header->free_offset;
416*4882a593Smuzhiyun entry->size = cpu_to_le32(size);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun * Ensure the header is consistent before we mark the item allocated,
420*4882a593Smuzhiyun * so that remote processors will get a consistent view of the item
421*4882a593Smuzhiyun * even though they do not take the spinlock on read.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun wmb();
424*4882a593Smuzhiyun entry->allocated = cpu_to_le32(1);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun le32_add_cpu(&header->free_offset, size);
427*4882a593Smuzhiyun le32_add_cpu(&header->available, -size);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * qcom_smem_alloc() - allocate space for a smem item
434*4882a593Smuzhiyun * @host: remote processor id, or -1
435*4882a593Smuzhiyun * @item: smem item handle
436*4882a593Smuzhiyun * @size: number of bytes to be allocated
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * Allocate space for a given smem item of size @size, given that the item is
439*4882a593Smuzhiyun * not yet allocated.
440*4882a593Smuzhiyun */
qcom_smem_alloc(unsigned host,unsigned item,size_t size)441*4882a593Smuzhiyun int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct smem_partition_header *phdr;
444*4882a593Smuzhiyun unsigned long flags;
445*4882a593Smuzhiyun int ret;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (!__smem)
448*4882a593Smuzhiyun return -EPROBE_DEFER;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (item < SMEM_ITEM_LAST_FIXED) {
451*4882a593Smuzhiyun dev_err(__smem->dev,
452*4882a593Smuzhiyun "Rejecting allocation of static entry %d\n", item);
453*4882a593Smuzhiyun return -EINVAL;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (WARN_ON(item >= __smem->item_count))
457*4882a593Smuzhiyun return -EINVAL;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
460*4882a593Smuzhiyun HWSPINLOCK_TIMEOUT,
461*4882a593Smuzhiyun &flags);
462*4882a593Smuzhiyun if (ret)
463*4882a593Smuzhiyun return ret;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
466*4882a593Smuzhiyun phdr = __smem->partitions[host];
467*4882a593Smuzhiyun ret = qcom_smem_alloc_private(__smem, phdr, item, size);
468*4882a593Smuzhiyun } else if (__smem->global_partition) {
469*4882a593Smuzhiyun phdr = __smem->global_partition;
470*4882a593Smuzhiyun ret = qcom_smem_alloc_private(__smem, phdr, item, size);
471*4882a593Smuzhiyun } else {
472*4882a593Smuzhiyun ret = qcom_smem_alloc_global(__smem, item, size);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun hwspin_unlock_irqrestore(__smem->hwlock, &flags);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return ret;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun EXPORT_SYMBOL(qcom_smem_alloc);
480*4882a593Smuzhiyun
qcom_smem_get_global(struct qcom_smem * smem,unsigned item,size_t * size)481*4882a593Smuzhiyun static void *qcom_smem_get_global(struct qcom_smem *smem,
482*4882a593Smuzhiyun unsigned item,
483*4882a593Smuzhiyun size_t *size)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct smem_header *header;
486*4882a593Smuzhiyun struct smem_region *region;
487*4882a593Smuzhiyun struct smem_global_entry *entry;
488*4882a593Smuzhiyun u32 aux_base;
489*4882a593Smuzhiyun unsigned i;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun header = smem->regions[0].virt_base;
492*4882a593Smuzhiyun entry = &header->toc[item];
493*4882a593Smuzhiyun if (!entry->allocated)
494*4882a593Smuzhiyun return ERR_PTR(-ENXIO);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun for (i = 0; i < smem->num_regions; i++) {
499*4882a593Smuzhiyun region = &smem->regions[i];
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (region->aux_base == aux_base || !aux_base) {
502*4882a593Smuzhiyun if (size != NULL)
503*4882a593Smuzhiyun *size = le32_to_cpu(entry->size);
504*4882a593Smuzhiyun return region->virt_base + le32_to_cpu(entry->offset);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
qcom_smem_get_private(struct qcom_smem * smem,struct smem_partition_header * phdr,size_t cacheline,unsigned item,size_t * size)511*4882a593Smuzhiyun static void *qcom_smem_get_private(struct qcom_smem *smem,
512*4882a593Smuzhiyun struct smem_partition_header *phdr,
513*4882a593Smuzhiyun size_t cacheline,
514*4882a593Smuzhiyun unsigned item,
515*4882a593Smuzhiyun size_t *size)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct smem_private_entry *e, *end;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun e = phdr_to_first_uncached_entry(phdr);
520*4882a593Smuzhiyun end = phdr_to_last_uncached_entry(phdr);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun while (e < end) {
523*4882a593Smuzhiyun if (e->canary != SMEM_PRIVATE_CANARY)
524*4882a593Smuzhiyun goto invalid_canary;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (le16_to_cpu(e->item) == item) {
527*4882a593Smuzhiyun if (size != NULL)
528*4882a593Smuzhiyun *size = le32_to_cpu(e->size) -
529*4882a593Smuzhiyun le16_to_cpu(e->padding_data);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return uncached_entry_to_item(e);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun e = uncached_entry_next(e);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Item was not found in the uncached list, search the cached list */
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun e = phdr_to_first_cached_entry(phdr, cacheline);
540*4882a593Smuzhiyun end = phdr_to_last_cached_entry(phdr);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun while (e > end) {
543*4882a593Smuzhiyun if (e->canary != SMEM_PRIVATE_CANARY)
544*4882a593Smuzhiyun goto invalid_canary;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (le16_to_cpu(e->item) == item) {
547*4882a593Smuzhiyun if (size != NULL)
548*4882a593Smuzhiyun *size = le32_to_cpu(e->size) -
549*4882a593Smuzhiyun le16_to_cpu(e->padding_data);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun return cached_entry_to_item(e);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun e = cached_entry_next(e, cacheline);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun invalid_canary:
560*4882a593Smuzhiyun dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
561*4882a593Smuzhiyun le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * qcom_smem_get() - resolve ptr of size of a smem item
568*4882a593Smuzhiyun * @host: the remote processor, or -1
569*4882a593Smuzhiyun * @item: smem item handle
570*4882a593Smuzhiyun * @size: pointer to be filled out with size of the item
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Looks up smem item and returns pointer to it. Size of smem
573*4882a593Smuzhiyun * item is returned in @size.
574*4882a593Smuzhiyun */
qcom_smem_get(unsigned host,unsigned item,size_t * size)575*4882a593Smuzhiyun void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct smem_partition_header *phdr;
578*4882a593Smuzhiyun unsigned long flags;
579*4882a593Smuzhiyun size_t cacheln;
580*4882a593Smuzhiyun int ret;
581*4882a593Smuzhiyun void *ptr = ERR_PTR(-EPROBE_DEFER);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (!__smem)
584*4882a593Smuzhiyun return ptr;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (WARN_ON(item >= __smem->item_count))
587*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
590*4882a593Smuzhiyun HWSPINLOCK_TIMEOUT,
591*4882a593Smuzhiyun &flags);
592*4882a593Smuzhiyun if (ret)
593*4882a593Smuzhiyun return ERR_PTR(ret);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
596*4882a593Smuzhiyun phdr = __smem->partitions[host];
597*4882a593Smuzhiyun cacheln = __smem->cacheline[host];
598*4882a593Smuzhiyun ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
599*4882a593Smuzhiyun } else if (__smem->global_partition) {
600*4882a593Smuzhiyun phdr = __smem->global_partition;
601*4882a593Smuzhiyun cacheln = __smem->global_cacheline;
602*4882a593Smuzhiyun ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
603*4882a593Smuzhiyun } else {
604*4882a593Smuzhiyun ptr = qcom_smem_get_global(__smem, item, size);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun hwspin_unlock_irqrestore(__smem->hwlock, &flags);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun return ptr;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun EXPORT_SYMBOL(qcom_smem_get);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun * qcom_smem_get_free_space() - retrieve amount of free space in a partition
616*4882a593Smuzhiyun * @host: the remote processor identifying a partition, or -1
617*4882a593Smuzhiyun *
618*4882a593Smuzhiyun * To be used by smem clients as a quick way to determine if any new
619*4882a593Smuzhiyun * allocations has been made.
620*4882a593Smuzhiyun */
qcom_smem_get_free_space(unsigned host)621*4882a593Smuzhiyun int qcom_smem_get_free_space(unsigned host)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct smem_partition_header *phdr;
624*4882a593Smuzhiyun struct smem_header *header;
625*4882a593Smuzhiyun unsigned ret;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (!__smem)
628*4882a593Smuzhiyun return -EPROBE_DEFER;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
631*4882a593Smuzhiyun phdr = __smem->partitions[host];
632*4882a593Smuzhiyun ret = le32_to_cpu(phdr->offset_free_cached) -
633*4882a593Smuzhiyun le32_to_cpu(phdr->offset_free_uncached);
634*4882a593Smuzhiyun } else if (__smem->global_partition) {
635*4882a593Smuzhiyun phdr = __smem->global_partition;
636*4882a593Smuzhiyun ret = le32_to_cpu(phdr->offset_free_cached) -
637*4882a593Smuzhiyun le32_to_cpu(phdr->offset_free_uncached);
638*4882a593Smuzhiyun } else {
639*4882a593Smuzhiyun header = __smem->regions[0].virt_base;
640*4882a593Smuzhiyun ret = le32_to_cpu(header->available);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun return ret;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun EXPORT_SYMBOL(qcom_smem_get_free_space);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /**
648*4882a593Smuzhiyun * qcom_smem_virt_to_phys() - return the physical address associated
649*4882a593Smuzhiyun * with an smem item pointer (previously returned by qcom_smem_get()
650*4882a593Smuzhiyun * @p: the virtual address to convert
651*4882a593Smuzhiyun *
652*4882a593Smuzhiyun * Returns 0 if the pointer provided is not within any smem region.
653*4882a593Smuzhiyun */
qcom_smem_virt_to_phys(void * p)654*4882a593Smuzhiyun phys_addr_t qcom_smem_virt_to_phys(void *p)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun unsigned i;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun for (i = 0; i < __smem->num_regions; i++) {
659*4882a593Smuzhiyun struct smem_region *region = &__smem->regions[i];
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (p < region->virt_base)
662*4882a593Smuzhiyun continue;
663*4882a593Smuzhiyun if (p < region->virt_base + region->size) {
664*4882a593Smuzhiyun u64 offset = p - region->virt_base;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun return (phys_addr_t)region->aux_base + offset;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun EXPORT_SYMBOL(qcom_smem_virt_to_phys);
673*4882a593Smuzhiyun
qcom_smem_get_sbl_version(struct qcom_smem * smem)674*4882a593Smuzhiyun static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun struct smem_header *header;
677*4882a593Smuzhiyun __le32 *versions;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun header = smem->regions[0].virt_base;
680*4882a593Smuzhiyun versions = header->version;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
qcom_smem_get_ptable(struct qcom_smem * smem)685*4882a593Smuzhiyun static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct smem_ptable *ptable;
688*4882a593Smuzhiyun u32 version;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
691*4882a593Smuzhiyun if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
692*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun version = le32_to_cpu(ptable->version);
695*4882a593Smuzhiyun if (version != 1) {
696*4882a593Smuzhiyun dev_err(smem->dev,
697*4882a593Smuzhiyun "Unsupported partition header version %d\n", version);
698*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun return ptable;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
qcom_smem_get_item_count(struct qcom_smem * smem)703*4882a593Smuzhiyun static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun struct smem_ptable *ptable;
706*4882a593Smuzhiyun struct smem_info *info;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun ptable = qcom_smem_get_ptable(smem);
709*4882a593Smuzhiyun if (IS_ERR_OR_NULL(ptable))
710*4882a593Smuzhiyun return SMEM_ITEM_COUNT;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun info = (struct smem_info *)&ptable->entry[ptable->num_entries];
713*4882a593Smuzhiyun if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
714*4882a593Smuzhiyun return SMEM_ITEM_COUNT;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun return le16_to_cpu(info->num_items);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /*
720*4882a593Smuzhiyun * Validate the partition header for a partition whose partition
721*4882a593Smuzhiyun * table entry is supplied. Returns a pointer to its header if
722*4882a593Smuzhiyun * valid, or a null pointer otherwise.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun static struct smem_partition_header *
qcom_smem_partition_header(struct qcom_smem * smem,struct smem_ptable_entry * entry,u16 host0,u16 host1)725*4882a593Smuzhiyun qcom_smem_partition_header(struct qcom_smem *smem,
726*4882a593Smuzhiyun struct smem_ptable_entry *entry, u16 host0, u16 host1)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct smem_partition_header *header;
729*4882a593Smuzhiyun u32 size;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
734*4882a593Smuzhiyun dev_err(smem->dev, "bad partition magic %02x %02x %02x %02x\n",
735*4882a593Smuzhiyun header->magic[0], header->magic[1],
736*4882a593Smuzhiyun header->magic[2], header->magic[3]);
737*4882a593Smuzhiyun return NULL;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (host0 != le16_to_cpu(header->host0)) {
741*4882a593Smuzhiyun dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
742*4882a593Smuzhiyun host0, le16_to_cpu(header->host0));
743*4882a593Smuzhiyun return NULL;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun if (host1 != le16_to_cpu(header->host1)) {
746*4882a593Smuzhiyun dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
747*4882a593Smuzhiyun host1, le16_to_cpu(header->host1));
748*4882a593Smuzhiyun return NULL;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun size = le32_to_cpu(header->size);
752*4882a593Smuzhiyun if (size != le32_to_cpu(entry->size)) {
753*4882a593Smuzhiyun dev_err(smem->dev, "bad partition size (%u != %u)\n",
754*4882a593Smuzhiyun size, le32_to_cpu(entry->size));
755*4882a593Smuzhiyun return NULL;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (le32_to_cpu(header->offset_free_uncached) > size) {
759*4882a593Smuzhiyun dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
760*4882a593Smuzhiyun le32_to_cpu(header->offset_free_uncached), size);
761*4882a593Smuzhiyun return NULL;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun return header;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
qcom_smem_set_global_partition(struct qcom_smem * smem)767*4882a593Smuzhiyun static int qcom_smem_set_global_partition(struct qcom_smem *smem)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct smem_partition_header *header;
770*4882a593Smuzhiyun struct smem_ptable_entry *entry;
771*4882a593Smuzhiyun struct smem_ptable *ptable;
772*4882a593Smuzhiyun bool found = false;
773*4882a593Smuzhiyun int i;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (smem->global_partition) {
776*4882a593Smuzhiyun dev_err(smem->dev, "Already found the global partition\n");
777*4882a593Smuzhiyun return -EINVAL;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ptable = qcom_smem_get_ptable(smem);
781*4882a593Smuzhiyun if (IS_ERR(ptable))
782*4882a593Smuzhiyun return PTR_ERR(ptable);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
785*4882a593Smuzhiyun entry = &ptable->entry[i];
786*4882a593Smuzhiyun if (!le32_to_cpu(entry->offset))
787*4882a593Smuzhiyun continue;
788*4882a593Smuzhiyun if (!le32_to_cpu(entry->size))
789*4882a593Smuzhiyun continue;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
792*4882a593Smuzhiyun continue;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
795*4882a593Smuzhiyun found = true;
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (!found) {
801*4882a593Smuzhiyun dev_err(smem->dev, "Missing entry for global partition\n");
802*4882a593Smuzhiyun return -EINVAL;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun header = qcom_smem_partition_header(smem, entry,
806*4882a593Smuzhiyun SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
807*4882a593Smuzhiyun if (!header)
808*4882a593Smuzhiyun return -EINVAL;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun smem->global_partition = header;
811*4882a593Smuzhiyun smem->global_cacheline = le32_to_cpu(entry->cacheline);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun static int
qcom_smem_enumerate_partitions(struct qcom_smem * smem,u16 local_host)817*4882a593Smuzhiyun qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct smem_partition_header *header;
820*4882a593Smuzhiyun struct smem_ptable_entry *entry;
821*4882a593Smuzhiyun struct smem_ptable *ptable;
822*4882a593Smuzhiyun unsigned int remote_host;
823*4882a593Smuzhiyun u16 host0, host1;
824*4882a593Smuzhiyun int i;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun ptable = qcom_smem_get_ptable(smem);
827*4882a593Smuzhiyun if (IS_ERR(ptable))
828*4882a593Smuzhiyun return PTR_ERR(ptable);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
831*4882a593Smuzhiyun entry = &ptable->entry[i];
832*4882a593Smuzhiyun if (!le32_to_cpu(entry->offset))
833*4882a593Smuzhiyun continue;
834*4882a593Smuzhiyun if (!le32_to_cpu(entry->size))
835*4882a593Smuzhiyun continue;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun host0 = le16_to_cpu(entry->host0);
838*4882a593Smuzhiyun host1 = le16_to_cpu(entry->host1);
839*4882a593Smuzhiyun if (host0 == local_host)
840*4882a593Smuzhiyun remote_host = host1;
841*4882a593Smuzhiyun else if (host1 == local_host)
842*4882a593Smuzhiyun remote_host = host0;
843*4882a593Smuzhiyun else
844*4882a593Smuzhiyun continue;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (remote_host >= SMEM_HOST_COUNT) {
847*4882a593Smuzhiyun dev_err(smem->dev, "bad host %hu\n", remote_host);
848*4882a593Smuzhiyun return -EINVAL;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (smem->partitions[remote_host]) {
852*4882a593Smuzhiyun dev_err(smem->dev, "duplicate host %hu\n", remote_host);
853*4882a593Smuzhiyun return -EINVAL;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun header = qcom_smem_partition_header(smem, entry, host0, host1);
857*4882a593Smuzhiyun if (!header)
858*4882a593Smuzhiyun return -EINVAL;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun smem->partitions[remote_host] = header;
861*4882a593Smuzhiyun smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun return 0;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
qcom_smem_map_memory(struct qcom_smem * smem,struct device * dev,const char * name,int i)867*4882a593Smuzhiyun static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
868*4882a593Smuzhiyun const char *name, int i)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun struct device_node *np;
871*4882a593Smuzhiyun struct resource r;
872*4882a593Smuzhiyun resource_size_t size;
873*4882a593Smuzhiyun int ret;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun np = of_parse_phandle(dev->of_node, name, 0);
876*4882a593Smuzhiyun if (!np) {
877*4882a593Smuzhiyun dev_err(dev, "No %s specified\n", name);
878*4882a593Smuzhiyun return -EINVAL;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &r);
882*4882a593Smuzhiyun of_node_put(np);
883*4882a593Smuzhiyun if (ret)
884*4882a593Smuzhiyun return ret;
885*4882a593Smuzhiyun size = resource_size(&r);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
888*4882a593Smuzhiyun if (!smem->regions[i].virt_base)
889*4882a593Smuzhiyun return -ENOMEM;
890*4882a593Smuzhiyun smem->regions[i].aux_base = (u32)r.start;
891*4882a593Smuzhiyun smem->regions[i].size = size;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun return 0;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
qcom_smem_probe(struct platform_device * pdev)896*4882a593Smuzhiyun static int qcom_smem_probe(struct platform_device *pdev)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun struct smem_header *header;
899*4882a593Smuzhiyun struct qcom_smem *smem;
900*4882a593Smuzhiyun size_t array_size;
901*4882a593Smuzhiyun int num_regions;
902*4882a593Smuzhiyun int hwlock_id;
903*4882a593Smuzhiyun u32 version;
904*4882a593Smuzhiyun int ret;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun num_regions = 1;
907*4882a593Smuzhiyun if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
908*4882a593Smuzhiyun num_regions++;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun array_size = num_regions * sizeof(struct smem_region);
911*4882a593Smuzhiyun smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
912*4882a593Smuzhiyun if (!smem)
913*4882a593Smuzhiyun return -ENOMEM;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun smem->dev = &pdev->dev;
916*4882a593Smuzhiyun smem->num_regions = num_regions;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
919*4882a593Smuzhiyun if (ret)
920*4882a593Smuzhiyun return ret;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
923*4882a593Smuzhiyun "qcom,rpm-msg-ram", 1)))
924*4882a593Smuzhiyun return ret;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun header = smem->regions[0].virt_base;
927*4882a593Smuzhiyun if (le32_to_cpu(header->initialized) != 1 ||
928*4882a593Smuzhiyun le32_to_cpu(header->reserved)) {
929*4882a593Smuzhiyun dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
930*4882a593Smuzhiyun return -EINVAL;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun version = qcom_smem_get_sbl_version(smem);
934*4882a593Smuzhiyun switch (version >> 16) {
935*4882a593Smuzhiyun case SMEM_GLOBAL_PART_VERSION:
936*4882a593Smuzhiyun ret = qcom_smem_set_global_partition(smem);
937*4882a593Smuzhiyun if (ret < 0)
938*4882a593Smuzhiyun return ret;
939*4882a593Smuzhiyun smem->item_count = qcom_smem_get_item_count(smem);
940*4882a593Smuzhiyun break;
941*4882a593Smuzhiyun case SMEM_GLOBAL_HEAP_VERSION:
942*4882a593Smuzhiyun smem->item_count = SMEM_ITEM_COUNT;
943*4882a593Smuzhiyun break;
944*4882a593Smuzhiyun default:
945*4882a593Smuzhiyun dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
946*4882a593Smuzhiyun return -EINVAL;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
950*4882a593Smuzhiyun ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
951*4882a593Smuzhiyun if (ret < 0 && ret != -ENOENT)
952*4882a593Smuzhiyun return ret;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
955*4882a593Smuzhiyun if (hwlock_id < 0) {
956*4882a593Smuzhiyun if (hwlock_id != -EPROBE_DEFER)
957*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to retrieve hwlock\n");
958*4882a593Smuzhiyun return hwlock_id;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun smem->hwlock = hwspin_lock_request_specific(hwlock_id);
962*4882a593Smuzhiyun if (!smem->hwlock)
963*4882a593Smuzhiyun return -ENXIO;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun __smem = smem;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
968*4882a593Smuzhiyun PLATFORM_DEVID_NONE, NULL,
969*4882a593Smuzhiyun 0);
970*4882a593Smuzhiyun if (IS_ERR(smem->socinfo))
971*4882a593Smuzhiyun dev_dbg(&pdev->dev, "failed to register socinfo device\n");
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return 0;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
qcom_smem_remove(struct platform_device * pdev)976*4882a593Smuzhiyun static int qcom_smem_remove(struct platform_device *pdev)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun platform_device_unregister(__smem->socinfo);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun hwspin_lock_free(__smem->hwlock);
981*4882a593Smuzhiyun __smem = NULL;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun return 0;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun static const struct of_device_id qcom_smem_of_match[] = {
987*4882a593Smuzhiyun { .compatible = "qcom,smem" },
988*4882a593Smuzhiyun {}
989*4882a593Smuzhiyun };
990*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun static struct platform_driver qcom_smem_driver = {
993*4882a593Smuzhiyun .probe = qcom_smem_probe,
994*4882a593Smuzhiyun .remove = qcom_smem_remove,
995*4882a593Smuzhiyun .driver = {
996*4882a593Smuzhiyun .name = "qcom-smem",
997*4882a593Smuzhiyun .of_match_table = qcom_smem_of_match,
998*4882a593Smuzhiyun .suppress_bind_attrs = true,
999*4882a593Smuzhiyun },
1000*4882a593Smuzhiyun };
1001*4882a593Smuzhiyun
qcom_smem_init(void)1002*4882a593Smuzhiyun static int __init qcom_smem_init(void)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun return platform_driver_register(&qcom_smem_driver);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun arch_initcall(qcom_smem_init);
1007*4882a593Smuzhiyun
qcom_smem_exit(void)1008*4882a593Smuzhiyun static void __exit qcom_smem_exit(void)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun platform_driver_unregister(&qcom_smem_driver);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun module_exit(qcom_smem_exit)
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1015*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1016*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1017