1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun * imr.c -- Intel Isolated Memory Region driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright(c) 2013 Intel Corporation.
6*4882a593Smuzhiyun * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * IMR registers define an isolated region of memory that can
9*4882a593Smuzhiyun * be masked to prohibit certain system agents from accessing memory.
10*4882a593Smuzhiyun * When a device behind a masked port performs an access - snooped or
11*4882a593Smuzhiyun * not, an IMR may optionally prevent that transaction from changing
12*4882a593Smuzhiyun * the state of memory or from getting correct data in response to the
13*4882a593Smuzhiyun * operation.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Write data will be dropped and reads will return 0xFFFFFFFF, the
16*4882a593Smuzhiyun * system will reset and system BIOS will print out an error message to
17*4882a593Smuzhiyun * inform the user that an IMR has been violated.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * This code is based on the Linux MTRR code and reference code from
20*4882a593Smuzhiyun * Intel's Quark BSP EFI, Linux and grub code.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * See quark-x1000-datasheet.pdf for register definitions.
23*4882a593Smuzhiyun * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm-generic/sections.h>
29*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
30*4882a593Smuzhiyun #include <asm/imr.h>
31*4882a593Smuzhiyun #include <asm/iosf_mbi.h>
32*4882a593Smuzhiyun #include <asm/io.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <linux/debugfs.h>
35*4882a593Smuzhiyun #include <linux/init.h>
36*4882a593Smuzhiyun #include <linux/mm.h>
37*4882a593Smuzhiyun #include <linux/types.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct imr_device {
40*4882a593Smuzhiyun bool init;
41*4882a593Smuzhiyun struct mutex lock;
42*4882a593Smuzhiyun int max_imr;
43*4882a593Smuzhiyun int reg_base;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun static struct imr_device imr_dev;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * IMR read/write mask control registers.
50*4882a593Smuzhiyun * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
51*4882a593Smuzhiyun * bit definitions.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * addr_hi
54*4882a593Smuzhiyun * 31 Lock bit
55*4882a593Smuzhiyun * 30:24 Reserved
56*4882a593Smuzhiyun * 23:2 1 KiB aligned lo address
57*4882a593Smuzhiyun * 1:0 Reserved
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * addr_hi
60*4882a593Smuzhiyun * 31:24 Reserved
61*4882a593Smuzhiyun * 23:2 1 KiB aligned hi address
62*4882a593Smuzhiyun * 1:0 Reserved
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun #define IMR_LOCK BIT(31)
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct imr_regs {
67*4882a593Smuzhiyun u32 addr_lo;
68*4882a593Smuzhiyun u32 addr_hi;
69*4882a593Smuzhiyun u32 rmask;
70*4882a593Smuzhiyun u32 wmask;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32))
74*4882a593Smuzhiyun #define IMR_SHIFT 8
75*4882a593Smuzhiyun #define imr_to_phys(x) ((x) << IMR_SHIFT)
76*4882a593Smuzhiyun #define phys_to_imr(x) ((x) >> IMR_SHIFT)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * imr_is_enabled - true if an IMR is enabled false otherwise.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Determines if an IMR is enabled based on address range and read/write
82*4882a593Smuzhiyun * mask. An IMR set with an address range set to zero and a read/write
83*4882a593Smuzhiyun * access mask set to all is considered to be disabled. An IMR in any
84*4882a593Smuzhiyun * other state - for example set to zero but without read/write access
85*4882a593Smuzhiyun * all is considered to be enabled. This definition of disabled is how
86*4882a593Smuzhiyun * firmware switches off an IMR and is maintained in kernel for
87*4882a593Smuzhiyun * consistency.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * @imr: pointer to IMR descriptor.
90*4882a593Smuzhiyun * @return: true if IMR enabled false if disabled.
91*4882a593Smuzhiyun */
imr_is_enabled(struct imr_regs * imr)92*4882a593Smuzhiyun static inline int imr_is_enabled(struct imr_regs *imr)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun return !(imr->rmask == IMR_READ_ACCESS_ALL &&
95*4882a593Smuzhiyun imr->wmask == IMR_WRITE_ACCESS_ALL &&
96*4882a593Smuzhiyun imr_to_phys(imr->addr_lo) == 0 &&
97*4882a593Smuzhiyun imr_to_phys(imr->addr_hi) == 0);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * imr_read - read an IMR at a given index.
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Requires caller to hold imr mutex.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * @idev: pointer to imr_device structure.
106*4882a593Smuzhiyun * @imr_id: IMR entry to read.
107*4882a593Smuzhiyun * @imr: IMR structure representing address and access masks.
108*4882a593Smuzhiyun * @return: 0 on success or error code passed from mbi_iosf on failure.
109*4882a593Smuzhiyun */
imr_read(struct imr_device * idev,u32 imr_id,struct imr_regs * imr)110*4882a593Smuzhiyun static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
113*4882a593Smuzhiyun int ret;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
116*4882a593Smuzhiyun if (ret)
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
120*4882a593Smuzhiyun if (ret)
121*4882a593Smuzhiyun return ret;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
124*4882a593Smuzhiyun if (ret)
125*4882a593Smuzhiyun return ret;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * imr_write - write an IMR at a given index.
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * Requires caller to hold imr mutex.
134*4882a593Smuzhiyun * Note lock bits need to be written independently of address bits.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * @idev: pointer to imr_device structure.
137*4882a593Smuzhiyun * @imr_id: IMR entry to write.
138*4882a593Smuzhiyun * @imr: IMR structure representing address and access masks.
139*4882a593Smuzhiyun * @return: 0 on success or error code passed from mbi_iosf on failure.
140*4882a593Smuzhiyun */
imr_write(struct imr_device * idev,u32 imr_id,struct imr_regs * imr)141*4882a593Smuzhiyun static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun unsigned long flags;
144*4882a593Smuzhiyun u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
145*4882a593Smuzhiyun int ret;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun local_irq_save(flags);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
150*4882a593Smuzhiyun if (ret)
151*4882a593Smuzhiyun goto failed;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
154*4882a593Smuzhiyun if (ret)
155*4882a593Smuzhiyun goto failed;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
158*4882a593Smuzhiyun if (ret)
159*4882a593Smuzhiyun goto failed;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
162*4882a593Smuzhiyun if (ret)
163*4882a593Smuzhiyun goto failed;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun local_irq_restore(flags);
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun failed:
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * If writing to the IOSF failed then we're in an unknown state,
170*4882a593Smuzhiyun * likely a very bad state. An IMR in an invalid state will almost
171*4882a593Smuzhiyun * certainly lead to a memory access violation.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun local_irq_restore(flags);
174*4882a593Smuzhiyun WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
175*4882a593Smuzhiyun imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return ret;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun * imr_dbgfs_state_show - print state of IMR registers.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * @s: pointer to seq_file for output.
184*4882a593Smuzhiyun * @unused: unused parameter.
185*4882a593Smuzhiyun * @return: 0 on success or error code passed from mbi_iosf on failure.
186*4882a593Smuzhiyun */
imr_dbgfs_state_show(struct seq_file * s,void * unused)187*4882a593Smuzhiyun static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun phys_addr_t base;
190*4882a593Smuzhiyun phys_addr_t end;
191*4882a593Smuzhiyun int i;
192*4882a593Smuzhiyun struct imr_device *idev = s->private;
193*4882a593Smuzhiyun struct imr_regs imr;
194*4882a593Smuzhiyun size_t size;
195*4882a593Smuzhiyun int ret = -ENODEV;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun mutex_lock(&idev->lock);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun for (i = 0; i < idev->max_imr; i++) {
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ret = imr_read(idev, i, &imr);
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Remember to add IMR_ALIGN bytes to size to indicate the
207*4882a593Smuzhiyun * inherent IMR_ALIGN size bytes contained in the masked away
208*4882a593Smuzhiyun * lower ten bits.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun if (imr_is_enabled(&imr)) {
211*4882a593Smuzhiyun base = imr_to_phys(imr.addr_lo);
212*4882a593Smuzhiyun end = imr_to_phys(imr.addr_hi) + IMR_MASK;
213*4882a593Smuzhiyun size = end - base + 1;
214*4882a593Smuzhiyun } else {
215*4882a593Smuzhiyun base = 0;
216*4882a593Smuzhiyun end = 0;
217*4882a593Smuzhiyun size = 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
220*4882a593Smuzhiyun "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
221*4882a593Smuzhiyun &base, &end, size, imr.rmask, imr.wmask,
222*4882a593Smuzhiyun imr_is_enabled(&imr) ? "enabled " : "disabled",
223*4882a593Smuzhiyun imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun mutex_unlock(&idev->lock);
227*4882a593Smuzhiyun return ret;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * imr_debugfs_register - register debugfs hooks.
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * @idev: pointer to imr_device structure.
235*4882a593Smuzhiyun */
imr_debugfs_register(struct imr_device * idev)236*4882a593Smuzhiyun static void imr_debugfs_register(struct imr_device *idev)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun debugfs_create_file("imr_state", 0444, NULL, idev,
239*4882a593Smuzhiyun &imr_dbgfs_state_fops);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun * imr_check_params - check passed address range IMR alignment and non-zero size
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * @base: base address of intended IMR.
246*4882a593Smuzhiyun * @size: size of intended IMR.
247*4882a593Smuzhiyun * @return: zero on valid range -EINVAL on unaligned base/size.
248*4882a593Smuzhiyun */
imr_check_params(phys_addr_t base,size_t size)249*4882a593Smuzhiyun static int imr_check_params(phys_addr_t base, size_t size)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun if ((base & IMR_MASK) || (size & IMR_MASK)) {
252*4882a593Smuzhiyun pr_err("base %pa size 0x%08zx must align to 1KiB\n",
253*4882a593Smuzhiyun &base, size);
254*4882a593Smuzhiyun return -EINVAL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun if (size == 0)
257*4882a593Smuzhiyun return -EINVAL;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
266*4882a593Smuzhiyun * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
267*4882a593Smuzhiyun * as a result.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * @size: input size bytes.
270*4882a593Smuzhiyun * @return: reduced size.
271*4882a593Smuzhiyun */
imr_raw_size(size_t size)272*4882a593Smuzhiyun static inline size_t imr_raw_size(size_t size)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun return size - IMR_ALIGN;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun * imr_address_overlap - detects an address overlap.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * @addr: address to check against an existing IMR.
281*4882a593Smuzhiyun * @imr: imr being checked.
282*4882a593Smuzhiyun * @return: true for overlap false for no overlap.
283*4882a593Smuzhiyun */
imr_address_overlap(phys_addr_t addr,struct imr_regs * imr)284*4882a593Smuzhiyun static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun * imr_add_range - add an Isolated Memory Region.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * @base: physical base address of region aligned to 1KiB.
293*4882a593Smuzhiyun * @size: physical size of region in bytes must be aligned to 1KiB.
294*4882a593Smuzhiyun * @read_mask: read access mask.
295*4882a593Smuzhiyun * @write_mask: write access mask.
296*4882a593Smuzhiyun * @return: zero on success or negative value indicating error.
297*4882a593Smuzhiyun */
imr_add_range(phys_addr_t base,size_t size,unsigned int rmask,unsigned int wmask)298*4882a593Smuzhiyun int imr_add_range(phys_addr_t base, size_t size,
299*4882a593Smuzhiyun unsigned int rmask, unsigned int wmask)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun phys_addr_t end;
302*4882a593Smuzhiyun unsigned int i;
303*4882a593Smuzhiyun struct imr_device *idev = &imr_dev;
304*4882a593Smuzhiyun struct imr_regs imr;
305*4882a593Smuzhiyun size_t raw_size;
306*4882a593Smuzhiyun int reg;
307*4882a593Smuzhiyun int ret;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (WARN_ONCE(idev->init == false, "driver not initialized"))
310*4882a593Smuzhiyun return -ENODEV;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun ret = imr_check_params(base, size);
313*4882a593Smuzhiyun if (ret)
314*4882a593Smuzhiyun return ret;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Tweak the size value. */
317*4882a593Smuzhiyun raw_size = imr_raw_size(size);
318*4882a593Smuzhiyun end = base + raw_size;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * Check for reserved IMR value common to firmware, kernel and grub
322*4882a593Smuzhiyun * indicating a disabled IMR.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun imr.addr_lo = phys_to_imr(base);
325*4882a593Smuzhiyun imr.addr_hi = phys_to_imr(end);
326*4882a593Smuzhiyun imr.rmask = rmask;
327*4882a593Smuzhiyun imr.wmask = wmask;
328*4882a593Smuzhiyun if (!imr_is_enabled(&imr))
329*4882a593Smuzhiyun return -ENOTSUPP;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun mutex_lock(&idev->lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * Find a free IMR while checking for an existing overlapping range.
335*4882a593Smuzhiyun * Note there's no restriction in silicon to prevent IMR overlaps.
336*4882a593Smuzhiyun * For the sake of simplicity and ease in defining/debugging an IMR
337*4882a593Smuzhiyun * memory map we exclude IMR overlaps.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun reg = -1;
340*4882a593Smuzhiyun for (i = 0; i < idev->max_imr; i++) {
341*4882a593Smuzhiyun ret = imr_read(idev, i, &imr);
342*4882a593Smuzhiyun if (ret)
343*4882a593Smuzhiyun goto failed;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Find overlap @ base or end of requested range. */
346*4882a593Smuzhiyun ret = -EINVAL;
347*4882a593Smuzhiyun if (imr_is_enabled(&imr)) {
348*4882a593Smuzhiyun if (imr_address_overlap(base, &imr))
349*4882a593Smuzhiyun goto failed;
350*4882a593Smuzhiyun if (imr_address_overlap(end, &imr))
351*4882a593Smuzhiyun goto failed;
352*4882a593Smuzhiyun } else {
353*4882a593Smuzhiyun reg = i;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Error out if we have no free IMR entries. */
358*4882a593Smuzhiyun if (reg == -1) {
359*4882a593Smuzhiyun ret = -ENOMEM;
360*4882a593Smuzhiyun goto failed;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
364*4882a593Smuzhiyun reg, &base, &end, raw_size, rmask, wmask);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* Enable IMR at specified range and access mask. */
367*4882a593Smuzhiyun imr.addr_lo = phys_to_imr(base);
368*4882a593Smuzhiyun imr.addr_hi = phys_to_imr(end);
369*4882a593Smuzhiyun imr.rmask = rmask;
370*4882a593Smuzhiyun imr.wmask = wmask;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun ret = imr_write(idev, reg, &imr);
373*4882a593Smuzhiyun if (ret < 0) {
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * In the highly unlikely event iosf_mbi_write failed
376*4882a593Smuzhiyun * attempt to rollback the IMR setup skipping the trapping
377*4882a593Smuzhiyun * of further IOSF write failures.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun imr.addr_lo = 0;
380*4882a593Smuzhiyun imr.addr_hi = 0;
381*4882a593Smuzhiyun imr.rmask = IMR_READ_ACCESS_ALL;
382*4882a593Smuzhiyun imr.wmask = IMR_WRITE_ACCESS_ALL;
383*4882a593Smuzhiyun imr_write(idev, reg, &imr);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun failed:
386*4882a593Smuzhiyun mutex_unlock(&idev->lock);
387*4882a593Smuzhiyun return ret;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(imr_add_range);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun * __imr_remove_range - delete an Isolated Memory Region.
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * This function allows you to delete an IMR by its index specified by reg or
395*4882a593Smuzhiyun * by address range specified by base and size respectively. If you specify an
396*4882a593Smuzhiyun * index on its own the base and size parameters are ignored.
397*4882a593Smuzhiyun * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
398*4882a593Smuzhiyun * imr_remove_range(-1, base, size); delete IMR from base to base+size.
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * @reg: imr index to remove.
401*4882a593Smuzhiyun * @base: physical base address of region aligned to 1 KiB.
402*4882a593Smuzhiyun * @size: physical size of region in bytes aligned to 1 KiB.
403*4882a593Smuzhiyun * @return: -EINVAL on invalid range or out or range id
404*4882a593Smuzhiyun * -ENODEV if reg is valid but no IMR exists or is locked
405*4882a593Smuzhiyun * 0 on success.
406*4882a593Smuzhiyun */
__imr_remove_range(int reg,phys_addr_t base,size_t size)407*4882a593Smuzhiyun static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun phys_addr_t end;
410*4882a593Smuzhiyun bool found = false;
411*4882a593Smuzhiyun unsigned int i;
412*4882a593Smuzhiyun struct imr_device *idev = &imr_dev;
413*4882a593Smuzhiyun struct imr_regs imr;
414*4882a593Smuzhiyun size_t raw_size;
415*4882a593Smuzhiyun int ret = 0;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (WARN_ONCE(idev->init == false, "driver not initialized"))
418*4882a593Smuzhiyun return -ENODEV;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Validate address range if deleting by address, else we are
422*4882a593Smuzhiyun * deleting by index where base and size will be ignored.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun if (reg == -1) {
425*4882a593Smuzhiyun ret = imr_check_params(base, size);
426*4882a593Smuzhiyun if (ret)
427*4882a593Smuzhiyun return ret;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* Tweak the size value. */
431*4882a593Smuzhiyun raw_size = imr_raw_size(size);
432*4882a593Smuzhiyun end = base + raw_size;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun mutex_lock(&idev->lock);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (reg >= 0) {
437*4882a593Smuzhiyun /* If a specific IMR is given try to use it. */
438*4882a593Smuzhiyun ret = imr_read(idev, reg, &imr);
439*4882a593Smuzhiyun if (ret)
440*4882a593Smuzhiyun goto failed;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
443*4882a593Smuzhiyun ret = -ENODEV;
444*4882a593Smuzhiyun goto failed;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun found = true;
447*4882a593Smuzhiyun } else {
448*4882a593Smuzhiyun /* Search for match based on address range. */
449*4882a593Smuzhiyun for (i = 0; i < idev->max_imr; i++) {
450*4882a593Smuzhiyun ret = imr_read(idev, i, &imr);
451*4882a593Smuzhiyun if (ret)
452*4882a593Smuzhiyun goto failed;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
455*4882a593Smuzhiyun continue;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if ((imr_to_phys(imr.addr_lo) == base) &&
458*4882a593Smuzhiyun (imr_to_phys(imr.addr_hi) == end)) {
459*4882a593Smuzhiyun found = true;
460*4882a593Smuzhiyun reg = i;
461*4882a593Smuzhiyun break;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (!found) {
467*4882a593Smuzhiyun ret = -ENODEV;
468*4882a593Smuzhiyun goto failed;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Tear down the IMR. */
474*4882a593Smuzhiyun imr.addr_lo = 0;
475*4882a593Smuzhiyun imr.addr_hi = 0;
476*4882a593Smuzhiyun imr.rmask = IMR_READ_ACCESS_ALL;
477*4882a593Smuzhiyun imr.wmask = IMR_WRITE_ACCESS_ALL;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ret = imr_write(idev, reg, &imr);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun failed:
482*4882a593Smuzhiyun mutex_unlock(&idev->lock);
483*4882a593Smuzhiyun return ret;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun * imr_remove_range - delete an Isolated Memory Region by address
488*4882a593Smuzhiyun *
489*4882a593Smuzhiyun * This function allows you to delete an IMR by an address range specified
490*4882a593Smuzhiyun * by base and size respectively.
491*4882a593Smuzhiyun * imr_remove_range(base, size); delete IMR from base to base+size.
492*4882a593Smuzhiyun *
493*4882a593Smuzhiyun * @base: physical base address of region aligned to 1 KiB.
494*4882a593Smuzhiyun * @size: physical size of region in bytes aligned to 1 KiB.
495*4882a593Smuzhiyun * @return: -EINVAL on invalid range or out or range id
496*4882a593Smuzhiyun * -ENODEV if reg is valid but no IMR exists or is locked
497*4882a593Smuzhiyun * 0 on success.
498*4882a593Smuzhiyun */
imr_remove_range(phys_addr_t base,size_t size)499*4882a593Smuzhiyun int imr_remove_range(phys_addr_t base, size_t size)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun return __imr_remove_range(-1, base, size);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(imr_remove_range);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun * imr_clear - delete an Isolated Memory Region by index
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * This function allows you to delete an IMR by an address range specified
509*4882a593Smuzhiyun * by the index of the IMR. Useful for initial sanitization of the IMR
510*4882a593Smuzhiyun * address map.
511*4882a593Smuzhiyun * imr_ge(base, size); delete IMR from base to base+size.
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * @reg: imr index to remove.
514*4882a593Smuzhiyun * @return: -EINVAL on invalid range or out or range id
515*4882a593Smuzhiyun * -ENODEV if reg is valid but no IMR exists or is locked
516*4882a593Smuzhiyun * 0 on success.
517*4882a593Smuzhiyun */
imr_clear(int reg)518*4882a593Smuzhiyun static inline int imr_clear(int reg)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun return __imr_remove_range(reg, 0, 0);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun * imr_fixup_memmap - Tear down IMRs used during bootup.
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
527*4882a593Smuzhiyun * that need to be removed before the kernel hands out one of the IMR
528*4882a593Smuzhiyun * encased addresses to a downstream DMA agent such as the SD or Ethernet.
529*4882a593Smuzhiyun * IMRs on Galileo are setup to immediately reset the system on violation.
530*4882a593Smuzhiyun * As a result if you're running a root filesystem from SD - you'll need
531*4882a593Smuzhiyun * the boot-time IMRs torn down or you'll find seemingly random resets when
532*4882a593Smuzhiyun * using your filesystem.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * @idev: pointer to imr_device structure.
535*4882a593Smuzhiyun * @return:
536*4882a593Smuzhiyun */
imr_fixup_memmap(struct imr_device * idev)537*4882a593Smuzhiyun static void __init imr_fixup_memmap(struct imr_device *idev)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun phys_addr_t base = virt_to_phys(&_text);
540*4882a593Smuzhiyun size_t size = virt_to_phys(&__end_rodata) - base;
541*4882a593Smuzhiyun unsigned long start, end;
542*4882a593Smuzhiyun int i;
543*4882a593Smuzhiyun int ret;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* Tear down all existing unlocked IMRs. */
546*4882a593Smuzhiyun for (i = 0; i < idev->max_imr; i++)
547*4882a593Smuzhiyun imr_clear(i);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun start = (unsigned long)_text;
550*4882a593Smuzhiyun end = (unsigned long)__end_rodata - 1;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * Setup an unlocked IMR around the physical extent of the kernel
554*4882a593Smuzhiyun * from the beginning of the .text secton to the end of the
555*4882a593Smuzhiyun * .rodata section as one physically contiguous block.
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * We don't round up @size since it is already PAGE_SIZE aligned.
558*4882a593Smuzhiyun * See vmlinux.lds.S for details.
559*4882a593Smuzhiyun */
560*4882a593Smuzhiyun ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
561*4882a593Smuzhiyun if (ret < 0) {
562*4882a593Smuzhiyun pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
563*4882a593Smuzhiyun size / 1024, start, end);
564*4882a593Smuzhiyun } else {
565*4882a593Smuzhiyun pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
566*4882a593Smuzhiyun size / 1024, start, end);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun static const struct x86_cpu_id imr_ids[] __initconst = {
572*4882a593Smuzhiyun X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
573*4882a593Smuzhiyun {}
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * imr_init - entry point for IMR driver.
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * return: -ENODEV for no IMR support 0 if good to go.
580*4882a593Smuzhiyun */
imr_init(void)581*4882a593Smuzhiyun static int __init imr_init(void)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct imr_device *idev = &imr_dev;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
586*4882a593Smuzhiyun return -ENODEV;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun idev->max_imr = QUARK_X1000_IMR_MAX;
589*4882a593Smuzhiyun idev->reg_base = QUARK_X1000_IMR_REGBASE;
590*4882a593Smuzhiyun idev->init = true;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun mutex_init(&idev->lock);
593*4882a593Smuzhiyun imr_debugfs_register(idev);
594*4882a593Smuzhiyun imr_fixup_memmap(idev);
595*4882a593Smuzhiyun return 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun device_initcall(imr_init);
598