xref: /OK3568_Linux_fs/kernel/drivers/base/arm/protected_memory_allocator/protected_memory_allocator.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <linux/version.h>
23 #include <linux/of.h>
24 #include <linux/of_reserved_mem.h>
25 #include <linux/platform_device.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/io.h>
30 #include <linux/protected_memory_allocator.h>
31 
32 /* Size of a bitfield element in bytes */
33 #define BITFIELD_ELEM_SIZE sizeof(u64)
34 
35 /* We can track whether or not 64 pages are currently allocated in a u64 */
36 #define PAGES_PER_BITFIELD_ELEM (BITFIELD_ELEM_SIZE * BITS_PER_BYTE)
37 
38 /* Order 6 (ie, 64) corresponds to the number of pages held in a bitfield */
39 #define ORDER_OF_PAGES_PER_BITFIELD_ELEM 6
40 
41 /**
42  * struct simple_pma_device - Simple implementation of a protected memory
43  *                            allocator device
44  * @pma_dev: Protected memory allocator device pointer
45  * @dev:     Device pointer
46  * @allocated_pages_bitfield_arr: Status of all the physical memory pages within the
47  *                                protected memory region, one bit per page
48  * @rmem_base:      Base address of the reserved memory region
49  * @rmem_size:      Size of the reserved memory region, in pages
50  * @num_free_pages: Number of free pages in the memory region
51  * @rmem_lock:      Lock to serialize the allocation and freeing of
52  *                  physical pages from the protected memory region
53  */
54 struct simple_pma_device {
55 	struct protected_memory_allocator_device pma_dev;
56 	struct device *dev;
57 	u64 *allocated_pages_bitfield_arr;
58 	phys_addr_t rmem_base;
59 	size_t rmem_size;
60 	size_t num_free_pages;
61 	spinlock_t rmem_lock;
62 };
63 
64 /**
65  * ALLOC_PAGES_BITFIELD_ARR_SIZE() - Number of elements in array
66  *                                   'allocated_pages_bitfield_arr'
67  * If the number of pages required does not divide exactly by
68  * PAGES_PER_BITFIELD_ELEM, adds an extra page for the remainder.
69  * @num_pages: number of pages
70  */
71 #define ALLOC_PAGES_BITFIELD_ARR_SIZE(num_pages) \
72 	((PAGES_PER_BITFIELD_ELEM * (0 != (num_pages % PAGES_PER_BITFIELD_ELEM)) + \
73 	num_pages) / PAGES_PER_BITFIELD_ELEM)
74 
75 /**
76  * small_granularity_alloc() - Allocate 1-32 power-of-two pages.
77  * @epma_dev: protected memory allocator device structure.
78  * @alloc_bitfield_idx: index of the relevant bitfield.
79  * @start_bit: starting bitfield index.
80  * @order: bitshift for number of pages. Order of 0 to 5 equals 1 to 32 pages.
81  * @pma: protected_memory_allocation struct.
82  *
83  * Allocate a power-of-two number of pages, N, where
84  * 0 <= N <= ORDER_OF_PAGES_PER_BITFIELD_ELEM - 1.  ie, Up to 32 pages. The routine
85  * fills-in a pma structure and sets the appropriate bits in the allocated-pages
86  * bitfield array but assumes the caller has already determined that these are
87  * already clear.
88  *
89  * This routine always works within only a single allocated-pages bitfield element.
90  * It can be thought of as the 'small-granularity' allocator.
91  */
small_granularity_alloc(struct simple_pma_device * const epma_dev,size_t alloc_bitfield_idx,size_t start_bit,size_t order,struct protected_memory_allocation * pma)92 static void small_granularity_alloc(struct simple_pma_device *const epma_dev,
93 				    size_t alloc_bitfield_idx, size_t start_bit,
94 				    size_t order,
95 				    struct protected_memory_allocation *pma)
96 {
97 	size_t i;
98 	size_t page_idx;
99 	u64 *bitfield;
100 	size_t alloc_pages_bitfield_size;
101 
102 	if (WARN_ON(!epma_dev) ||
103 	    WARN_ON(!pma))
104 		return;
105 
106 	WARN(epma_dev->rmem_size == 0, "%s: rmem_size is 0", __func__);
107 	alloc_pages_bitfield_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
108 
109 	WARN(alloc_bitfield_idx >= alloc_pages_bitfield_size,
110 	     "%s: idx>bf_size: %zu %zu", __func__,
111 	     alloc_bitfield_idx, alloc_pages_bitfield_size);
112 
113 	WARN((start_bit + (1 << order)) > PAGES_PER_BITFIELD_ELEM,
114 	     "%s: start=%zu order=%zu ppbe=%zu",
115 	     __func__, start_bit, order, PAGES_PER_BITFIELD_ELEM);
116 
117 	bitfield = &epma_dev->allocated_pages_bitfield_arr[alloc_bitfield_idx];
118 
119 	for (i = 0; i < (1 << order); i++) {
120 		/* Check the pages represented by this bit are actually free */
121 		WARN(*bitfield & (1ULL << (start_bit + i)),
122 		      "in %s: page not free: %zu %zu %.16llx %zu\n",
123 		      __func__, i, order, *bitfield, alloc_pages_bitfield_size);
124 
125 		/* Mark the pages as now allocated */
126 		*bitfield |= (1ULL << (start_bit + i));
127 	}
128 
129 	/* Compute the page index */
130 	page_idx = (alloc_bitfield_idx * PAGES_PER_BITFIELD_ELEM) + start_bit;
131 
132 	/* Fill-in the allocation struct for the caller */
133 	pma->pa = epma_dev->rmem_base + (page_idx << PAGE_SHIFT);
134 	pma->order = order;
135 }
136 
137 /**
138  * large_granularity_alloc() - Allocate pages at multiples of 64 pages.
139  * @epma_dev: protected memory allocator device structure.
140  * @start_alloc_bitfield_idx: index of the starting bitfield.
141  * @order: bitshift for number of pages. Order of 6+ equals 64+ pages.
142  * @pma: protected_memory_allocation struct.
143  *
144  * Allocate a power-of-two number of pages, N, where
145  * N >= ORDER_OF_PAGES_PER_BITFIELD_ELEM. ie, 64 pages or more. The routine fills-in
146  * a pma structure and sets the appropriate bits in the allocated-pages bitfield array
147  * but assumes the caller has already determined that these are already clear.
148  *
149  * Unlike small_granularity_alloc, this routine can work with multiple 64-page groups,
150  * ie multiple elements from the allocated-pages bitfield array. However, it always
151  * works with complete sets of these 64-page groups. It can therefore be thought of
152  * as the 'large-granularity' allocator.
153  */
large_granularity_alloc(struct simple_pma_device * const epma_dev,size_t start_alloc_bitfield_idx,size_t order,struct protected_memory_allocation * pma)154 static void large_granularity_alloc(struct simple_pma_device *const epma_dev,
155 				    size_t start_alloc_bitfield_idx,
156 				    size_t order,
157 				    struct protected_memory_allocation *pma)
158 {
159 	size_t i;
160 	size_t num_pages_to_alloc = (size_t)1 << order;
161 	size_t num_bitfield_elements_needed = num_pages_to_alloc / PAGES_PER_BITFIELD_ELEM;
162 	size_t start_page_idx = start_alloc_bitfield_idx * PAGES_PER_BITFIELD_ELEM;
163 
164 	if (WARN_ON(!epma_dev) ||
165 	    WARN_ON(!pma))
166 		return;
167 
168 	/*
169 	 * Are there anough bitfield array elements (groups of 64 pages)
170 	 * between the start element and the end of the bitfield array
171 	 * to fulfill the request?
172 	 */
173 	WARN((start_alloc_bitfield_idx + order) >= ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size),
174 	     "%s: start=%zu order=%zu ms=%zu",
175 	     __func__, start_alloc_bitfield_idx, order, epma_dev->rmem_size);
176 
177 	for (i = 0; i < num_bitfield_elements_needed; i++) {
178 		u64 *bitfield = &epma_dev->allocated_pages_bitfield_arr[start_alloc_bitfield_idx + i];
179 
180 		/* We expect all pages that relate to this bitfield element to be free */
181 		WARN((*bitfield != 0),
182 		     "in %s: pages not free: i=%zu o=%zu bf=%.16llx\n",
183 		     __func__, i, order, *bitfield);
184 
185 		/* Mark all the pages for this element as not free */
186 		*bitfield = ~0ULL;
187 	}
188 
189 	/* Fill-in the allocation struct for the caller */
190 	pma->pa = epma_dev->rmem_base + (start_page_idx  << PAGE_SHIFT);
191 	pma->order = order;
192 }
193 
simple_pma_alloc_page(struct protected_memory_allocator_device * pma_dev,unsigned int order)194 static struct protected_memory_allocation *simple_pma_alloc_page(
195 	struct protected_memory_allocator_device *pma_dev, unsigned int order)
196 {
197 	struct simple_pma_device *const epma_dev =
198 		container_of(pma_dev, struct simple_pma_device, pma_dev);
199 	struct protected_memory_allocation *pma;
200 	size_t num_pages_to_alloc;
201 
202 	u64 *bitfields = epma_dev->allocated_pages_bitfield_arr;
203 	size_t i;
204 	size_t bit;
205 	size_t count;
206 
207 	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, order=%u\n",
208 		__func__, (void *)pma_dev, order);
209 
210 	/* This is an example function that follows an extremely simple logic
211 	 * and is very likely to fail to allocate memory if put under stress.
212 	 *
213 	 * The simple_pma_device maintains an array of u64s, with one bit used
214 	 * to track the status of each page.
215 	 *
216 	 * In order to create a memory allocation, the allocator looks for an
217 	 * adjacent group of cleared bits. This does leave the algorithm open
218 	 * to fragmentation issues, but is deemed sufficient for now.
219 	 * If successful, the allocator shall mark all the pages as allocated
220 	 * and increment the offset accordingly.
221 	 *
222 	 * Allocations of 64 pages or more (order 6) can be allocated only with
223 	 * 64-page alignment, in order to keep the algorithm as simple as
224 	 * possible. ie, starting from bit 0 of any 64-bit page-allocation
225 	 * bitfield. For this, the large-granularity allocator is utilised.
226 	 *
227 	 * Allocations of lower-order can only be allocated entirely within the
228 	 * same group of 64 pages, with the small-ganularity allocator  (ie
229 	 * always from the same 64-bit page-allocation bitfield) - again, to
230 	 * keep things as simple as possible, but flexible to meet
231 	 * current needs.
232 	 */
233 
234 	num_pages_to_alloc = (size_t)1 << order;
235 
236 	pma = devm_kzalloc(epma_dev->dev, sizeof(*pma), GFP_KERNEL);
237 	if (!pma) {
238 		dev_err(epma_dev->dev, "Failed to alloc pma struct");
239 		return NULL;
240 	}
241 
242 	spin_lock(&epma_dev->rmem_lock);
243 
244 	if (epma_dev->num_free_pages < num_pages_to_alloc) {
245 		dev_err(epma_dev->dev, "not enough free pages\n");
246 		devm_kfree(epma_dev->dev, pma);
247 		spin_unlock(&epma_dev->rmem_lock);
248 		return NULL;
249 	}
250 
251 	/*
252 	 * For order 0-5 (ie, 1 to 32 pages) we always allocate within the same set of 64 pages
253 	 * Currently, most allocations will be very small (1 page), so the more likely path
254 	 * here is order < ORDER_OF_PAGES_PER_BITFIELD_ELEM.
255 	 */
256 	if (likely(order < ORDER_OF_PAGES_PER_BITFIELD_ELEM)) {
257 		size_t alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
258 
259 		for (i = 0; i < alloc_pages_bitmap_size; i++) {
260 			count = 0;
261 
262 			for (bit = 0; bit < PAGES_PER_BITFIELD_ELEM; bit++) {
263 				if  (0 == (bitfields[i] & (1ULL << bit))) {
264 					if ((count + 1) >= num_pages_to_alloc) {
265 						/*
266 						 * We've found enough free, consecutive pages with which to
267 						 * make an allocation
268 						 */
269 						small_granularity_alloc(
270 							epma_dev, i,
271 							bit - count, order,
272 							pma);
273 
274 						epma_dev->num_free_pages -=
275 							num_pages_to_alloc;
276 
277 						spin_unlock(
278 							&epma_dev->rmem_lock);
279 						return pma;
280 					}
281 
282 					/* So far so good, but we need more set bits yet */
283 					count++;
284 				} else {
285 					/*
286 					 * We found an allocated page, so nothing we've seen so far can be used.
287 					 * Keep looking.
288 					 */
289 					count = 0;
290 				}
291 			}
292 		}
293 	} else {
294 		/**
295 		 * For allocations of order ORDER_OF_PAGES_PER_BITFIELD_ELEM and above (>= 64 pages), we know
296 		 * we'll only get allocations for whole groups of 64 pages, which hugely simplifies the task.
297 		 */
298 		size_t alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
299 
300 		/* How many 64-bit bitfield elements will be needed for the allocation? */
301 		size_t num_bitfield_elements_needed = num_pages_to_alloc / PAGES_PER_BITFIELD_ELEM;
302 
303 		count = 0;
304 
305 		for (i = 0; i < alloc_pages_bitmap_size; i++) {
306 			/* Are all the pages free for the i'th u64 bitfield element? */
307 			if (bitfields[i] == 0) {
308 				count += PAGES_PER_BITFIELD_ELEM;
309 
310 				if (count >= (1 << order)) {
311 					size_t start_idx = (i + 1) - num_bitfield_elements_needed;
312 
313 					large_granularity_alloc(epma_dev,
314 								start_idx,
315 								order, pma);
316 
317 					epma_dev->num_free_pages -= 1 << order;
318 					spin_unlock(&epma_dev->rmem_lock);
319 					return pma;
320 				}
321 			} else {
322 				count = 0;
323 			}
324 		}
325 	}
326 
327 	spin_unlock(&epma_dev->rmem_lock);
328 	devm_kfree(epma_dev->dev, pma);
329 
330 	dev_err(epma_dev->dev, "not enough contiguous pages (need %zu), total free pages left %zu\n",
331 		num_pages_to_alloc, epma_dev->num_free_pages);
332 	return NULL;
333 }
334 
simple_pma_get_phys_addr(struct protected_memory_allocator_device * pma_dev,struct protected_memory_allocation * pma)335 static phys_addr_t simple_pma_get_phys_addr(
336 	struct protected_memory_allocator_device *pma_dev,
337 	struct protected_memory_allocation *pma)
338 {
339 	struct simple_pma_device *const epma_dev =
340 		container_of(pma_dev, struct simple_pma_device, pma_dev);
341 
342 	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
343 		__func__, (void *)pma_dev, (void *)pma,
344 		(unsigned long long)pma->pa);
345 
346 	return pma->pa;
347 }
348 
simple_pma_free_page(struct protected_memory_allocator_device * pma_dev,struct protected_memory_allocation * pma)349 static void simple_pma_free_page(
350 	struct protected_memory_allocator_device *pma_dev,
351 	struct protected_memory_allocation *pma)
352 {
353 	struct simple_pma_device *const epma_dev =
354 		container_of(pma_dev, struct simple_pma_device, pma_dev);
355 	size_t num_pages_in_allocation;
356 	size_t offset;
357 	size_t i;
358 	size_t bitfield_idx;
359 	size_t bitfield_start_bit;
360 	size_t page_num;
361 	u64 *bitfield;
362 	size_t alloc_pages_bitmap_size;
363 	size_t num_bitfield_elems_used_by_alloc;
364 
365 	WARN_ON(pma == NULL);
366 
367 	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
368 		__func__, (void *)pma_dev, (void *)pma,
369 		(unsigned long long)pma->pa);
370 
371 	WARN_ON(pma->pa < epma_dev->rmem_base);
372 
373 	/* This is an example function that follows an extremely simple logic
374 	 * and is vulnerable to abuse.
375 	 */
376 	offset = (pma->pa - epma_dev->rmem_base);
377 	num_pages_in_allocation = (size_t)1 << pma->order;
378 
379 	/* The number of bitfield elements used by the allocation */
380 	num_bitfield_elems_used_by_alloc = num_pages_in_allocation / PAGES_PER_BITFIELD_ELEM;
381 
382 	/* The page number of the first page of the allocation, relative to rmem_base */
383 	page_num = offset >> PAGE_SHIFT;
384 
385 	/* Which u64 bitfield refers to this page? */
386 	bitfield_idx = page_num / PAGES_PER_BITFIELD_ELEM;
387 
388 	alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
389 
390 	/* Is the allocation within expected bounds? */
391 	WARN_ON((bitfield_idx + num_bitfield_elems_used_by_alloc) >= alloc_pages_bitmap_size);
392 
393 	spin_lock(&epma_dev->rmem_lock);
394 
395 	if (pma->order < ORDER_OF_PAGES_PER_BITFIELD_ELEM) {
396 		bitfield = &epma_dev->allocated_pages_bitfield_arr[bitfield_idx];
397 
398 		/* Which bit within that u64 bitfield is the lsb covering this allocation?  */
399 		bitfield_start_bit = page_num % PAGES_PER_BITFIELD_ELEM;
400 
401 		/* Clear the bits for the pages we're now freeing */
402 		*bitfield &= ~(((1ULL << num_pages_in_allocation) - 1) << bitfield_start_bit);
403 	} else {
404 		WARN(page_num % PAGES_PER_BITFIELD_ELEM,
405 		     "%s: Expecting allocs of order >= %d to be %zu-page aligned\n",
406 		     __func__, ORDER_OF_PAGES_PER_BITFIELD_ELEM, PAGES_PER_BITFIELD_ELEM);
407 
408 		for (i = 0; i < num_bitfield_elems_used_by_alloc; i++) {
409 			bitfield = &epma_dev->allocated_pages_bitfield_arr[bitfield_idx + i];
410 
411 			/* We expect all bits to be set (all pages allocated) */
412 			WARN((*bitfield != ~0),
413 			     "%s: alloc being freed is not fully allocated: of=%zu np=%zu bf=%.16llx\n",
414 			     __func__, offset, num_pages_in_allocation, *bitfield);
415 
416 			/*
417 			 * Now clear all the bits in the bitfield element to mark all the pages
418 			 * it refers to as free.
419 			 */
420 			*bitfield = 0ULL;
421 		}
422 	}
423 
424 	epma_dev->num_free_pages += num_pages_in_allocation;
425 	spin_unlock(&epma_dev->rmem_lock);
426 	devm_kfree(epma_dev->dev, pma);
427 }
428 
protected_memory_allocator_probe(struct platform_device * pdev)429 static int protected_memory_allocator_probe(struct platform_device *pdev)
430 {
431 	struct simple_pma_device *epma_dev;
432 	struct device_node *np;
433 	phys_addr_t rmem_base;
434 	size_t rmem_size;
435 	size_t alloc_bitmap_pages_arr_size;
436 #if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
437 	struct reserved_mem *rmem;
438 #endif
439 
440 	np = pdev->dev.of_node;
441 
442 	if (!np) {
443 		dev_err(&pdev->dev, "device node pointer not set\n");
444 		return -ENODEV;
445 	}
446 
447 	np = of_parse_phandle(np, "memory-region", 0);
448 	if (!np) {
449 		dev_err(&pdev->dev, "memory-region node not set\n");
450 		return -ENODEV;
451 	}
452 
453 #if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
454 	rmem = of_reserved_mem_lookup(np);
455 	if (rmem) {
456 		rmem_base = rmem->base;
457 		rmem_size = rmem->size >> PAGE_SHIFT;
458 	} else
459 #endif
460 	{
461 		of_node_put(np);
462 		dev_err(&pdev->dev, "could not read reserved memory-region\n");
463 		return -ENODEV;
464 	}
465 
466 	of_node_put(np);
467 	epma_dev = devm_kzalloc(&pdev->dev, sizeof(*epma_dev), GFP_KERNEL);
468 	if (!epma_dev)
469 		return -ENOMEM;
470 
471 	epma_dev->pma_dev.ops.pma_alloc_page = simple_pma_alloc_page;
472 	epma_dev->pma_dev.ops.pma_get_phys_addr = simple_pma_get_phys_addr;
473 	epma_dev->pma_dev.ops.pma_free_page = simple_pma_free_page;
474 	epma_dev->pma_dev.owner = THIS_MODULE;
475 	epma_dev->dev = &pdev->dev;
476 	epma_dev->rmem_base = rmem_base;
477 	epma_dev->rmem_size = rmem_size;
478 	epma_dev->num_free_pages = rmem_size;
479 	spin_lock_init(&epma_dev->rmem_lock);
480 
481 	alloc_bitmap_pages_arr_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
482 
483 	epma_dev->allocated_pages_bitfield_arr = devm_kzalloc(&pdev->dev,
484 		alloc_bitmap_pages_arr_size * BITFIELD_ELEM_SIZE, GFP_KERNEL);
485 
486 	if (!epma_dev->allocated_pages_bitfield_arr) {
487 		dev_err(&pdev->dev, "failed to allocate resources\n");
488 		devm_kfree(&pdev->dev, epma_dev);
489 		return -ENOMEM;
490 	}
491 
492 	if (epma_dev->rmem_size % PAGES_PER_BITFIELD_ELEM) {
493 		size_t extra_pages =
494 			alloc_bitmap_pages_arr_size * PAGES_PER_BITFIELD_ELEM -
495 			epma_dev->rmem_size;
496 		size_t last_bitfield_index = alloc_bitmap_pages_arr_size - 1;
497 
498 		/* Mark the extra pages (that lie outside the reserved range) as
499 		 * always in use.
500 		 */
501 		epma_dev->allocated_pages_bitfield_arr[last_bitfield_index] =
502 			((1ULL << extra_pages) - 1) <<
503 			(PAGES_PER_BITFIELD_ELEM - extra_pages);
504 	}
505 
506 	platform_set_drvdata(pdev, &epma_dev->pma_dev);
507 	dev_info(&pdev->dev,
508 		"Protected memory allocator probed successfully\n");
509 	dev_info(&pdev->dev, "Protected memory region: base=%llx num pages=%zu\n",
510 		(unsigned long long)rmem_base, rmem_size);
511 
512 	return 0;
513 }
514 
protected_memory_allocator_remove(struct platform_device * pdev)515 static int protected_memory_allocator_remove(struct platform_device *pdev)
516 {
517 	struct protected_memory_allocator_device *pma_dev =
518 		platform_get_drvdata(pdev);
519 	struct simple_pma_device *epma_dev;
520 	struct device *dev;
521 
522 	if (!pma_dev)
523 		return -EINVAL;
524 
525 	epma_dev = container_of(pma_dev, struct simple_pma_device, pma_dev);
526 	dev = epma_dev->dev;
527 
528 	if (epma_dev->num_free_pages < epma_dev->rmem_size) {
529 		dev_warn(&pdev->dev, "Leaking %zu pages of protected memory\n",
530 			epma_dev->rmem_size - epma_dev->num_free_pages);
531 	}
532 
533 	platform_set_drvdata(pdev, NULL);
534 	devm_kfree(dev, epma_dev->allocated_pages_bitfield_arr);
535 	devm_kfree(dev, epma_dev);
536 
537 	dev_info(&pdev->dev,
538 		"Protected memory allocator removed successfully\n");
539 
540 	return 0;
541 }
542 
543 static const struct of_device_id protected_memory_allocator_dt_ids[] = {
544 	{ .compatible = "arm,protected-memory-allocator" },
545 	{ /* sentinel */ }
546 };
547 MODULE_DEVICE_TABLE(of, protected_memory_allocator_dt_ids);
548 
549 static struct platform_driver protected_memory_allocator_driver = {
550 	.probe = protected_memory_allocator_probe,
551 	.remove = protected_memory_allocator_remove,
552 	.driver = {
553 		.name = "simple_protected_memory_allocator",
554 		.of_match_table = of_match_ptr(protected_memory_allocator_dt_ids),
555 	}
556 };
557 
558 module_platform_driver(protected_memory_allocator_driver);
559 
560 MODULE_LICENSE("GPL");
561 MODULE_AUTHOR("ARM Ltd.");
562 MODULE_VERSION("1.0");
563