1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2019, 2023 NXP 4 */ 5 #include <caam_common.h> 6 #include <caam_hal_ctrl.h> 7 #include <caam_hal_sm.h> 8 #include <caam_jr.h> 9 #include <caam_status.h> 10 #include <caam_sm.h> 11 #include <tee_api_defines.h> 12 13 /* 14 * Secure memory module private data 15 */ 16 static struct sm_privdata { 17 vaddr_t baseaddr; /* Secure memory base address */ 18 vaddr_t ctrl_addr; /* CAAM base address */ 19 vaddr_t jr_addr; /* Job Ring base address */ 20 paddr_t jr_offset; /* Job Ring offset */ 21 } sm_privdata; 22 23 enum caam_status caam_sm_alloc(const struct caam_sm_page_desc *page_desc, 24 struct caam_sm_page_addr *page_addr) 25 { 26 enum caam_status ret = CAAM_FAILURE; 27 28 if (!page_desc || !page_addr) 29 return TEE_ERROR_BAD_PARAMETERS; 30 31 ret = caam_hal_sm_check_page_partition(sm_privdata.jr_addr, page_desc); 32 if (ret != CAAM_NO_ERROR) { 33 SM_TRACE("Pages %u to %u or partition %u are out of bounds", 34 page_desc->page, 35 page_desc->page + page_desc->page_count - 1, 36 page_desc->partition); 37 return ret; 38 } 39 40 /* Check if partition is already allocated */ 41 if (!caam_hal_sm_prtn_is_free(sm_privdata.jr_addr, 42 page_desc->partition)) { 43 SM_TRACE("Partition %u not available", page_desc->partition); 44 return CAAM_BUSY; 45 } 46 47 /* Open secure memory partition to all groups */ 48 caam_hal_sm_open_access_perm(sm_privdata.jr_addr, page_desc->partition); 49 caam_hal_sm_set_access_all_group(sm_privdata.jr_addr, 50 page_desc->partition); 51 52 ret = caam_hal_sm_allocate_page(sm_privdata.jr_addr, page_desc); 53 if (ret != CAAM_NO_ERROR) { 54 SM_TRACE("Error allocation Pages %u to %u into partition %u", 55 page_desc->page, 56 page_desc->page + page_desc->page_count - 1, 57 page_desc->partition); 58 59 /* Free all potientiel pages allocated before failure */ 60 return caam_hal_sm_deallocate_pages(sm_privdata.jr_addr, 61 page_desc); 62 } 63 64 page_addr->paddr = caam_hal_ctrl_get_smvaddr(sm_privdata.ctrl_addr, 65 sm_privdata.jr_offset) + 66 caam_hal_sm_get_pages_size(sm_privdata.jr_addr, 67 page_desc->page); 68 page_addr->vaddr = sm_privdata.baseaddr + 69 caam_hal_sm_get_pages_size(sm_privdata.jr_addr, 70 page_desc->page); 71 72 SM_TRACE("Partition %u Pages %u to %u allocated @0x%" PRIxVA 73 " (phys 0x@%" PRIxPA, 74 page_desc->partition, page_desc->page, 75 page_desc->page + page_desc->page_count - 1, page_addr->vaddr, 76 page_addr->paddr); 77 78 return CAAM_NO_ERROR; 79 } 80 81 enum caam_status caam_sm_free(const struct caam_sm_page_desc *page_desc) 82 { 83 enum caam_status ret = CAAM_FAILURE; 84 85 SM_TRACE("Free Secure Memory pages %u to %u from partition %u", 86 page_desc->page, page_desc->page + page_desc->page_count, 87 page_desc->partition); 88 89 /* 90 * De-allocate partition. It automatically releases partition's pages 91 * to the pool of available pages. if the partition if marked as CSP, 92 * pages will be zeroized. If the partition is marked as PSP, 93 * partition and pages will not be de-allocated and a PSP will be 94 * returned. 95 */ 96 if (!caam_hal_sm_prtn_is_owned(sm_privdata.jr_addr, 97 page_desc->partition)) { 98 SM_TRACE("Partition %u not owned by used JR", 99 page_desc->partition); 100 return TEE_ERROR_ACCESS_DENIED; 101 } 102 103 ret = caam_hal_sm_deallocate_pages(sm_privdata.jr_addr, page_desc); 104 if (ret) { 105 SM_TRACE("De-alloc pages %u to %u error 0x%" PRIx32, 106 page_desc->page, 107 page_desc->page + page_desc->page_count, ret); 108 109 return ret; 110 } 111 112 ret = caam_hal_sm_deallocate_partition(sm_privdata.jr_addr, 113 page_desc->partition); 114 if (ret) { 115 SM_TRACE("De-alloc partition %u error 0x%" PRIx32, 116 page_desc->partition, ret); 117 return ret; 118 } 119 120 return CAAM_NO_ERROR; 121 } 122 123 enum caam_status 124 caam_sm_set_access_perm(const struct caam_sm_page_desc *page_desc, 125 unsigned int grp1_perm, unsigned int grp2_perm) 126 { 127 uint32_t grp1 = UINT32_MAX; 128 uint32_t grp2 = UINT32_MAX; 129 130 if (!page_desc) 131 return CAAM_BAD_PARAM; 132 133 /* Check if the partition is already owned */ 134 if (!caam_hal_sm_prtn_is_owned(sm_privdata.jr_addr, 135 page_desc->partition)) { 136 SM_TRACE("Partition %d not owned by current JR", 137 page_desc->partition); 138 return CAAM_FAILURE; 139 } 140 141 /* 142 * Set ourself to access Secure Memory group 1 and/or group 2 143 * function if @grp1_perm and/or @grp2_perm not equal 0. 144 * 145 * The Access Group is related to the Job Ring owner setting without 146 * the Secure Bit setting already managed by the Job Ring. 147 */ 148 if (grp1_perm) 149 grp1 = JROWN_ARM_NS; 150 151 if (grp2_perm) 152 grp2 = JROWN_ARM_NS; 153 154 caam_hal_sm_set_access_group(sm_privdata.jr_addr, page_desc->partition, 155 grp1, grp2); 156 caam_hal_sm_set_access_perm(sm_privdata.jr_addr, page_desc->partition, 157 grp1_perm, grp2_perm); 158 159 return CAAM_NO_ERROR; 160 } 161 162 enum caam_status caam_sm_init(struct caam_jrcfg *jrcfg) 163 { 164 if (!jrcfg) 165 return CAAM_FAILURE; 166 167 sm_privdata.ctrl_addr = jrcfg->base; 168 sm_privdata.jr_addr = jrcfg->base + jrcfg->offset; 169 sm_privdata.jr_offset = jrcfg->offset; 170 sm_privdata.baseaddr = caam_hal_sm_get_base(); 171 172 if (!sm_privdata.baseaddr) 173 return CAAM_FAILURE; 174 175 SM_TRACE("Secure Memory Base address = 0x%" PRIxVA, 176 sm_privdata.baseaddr); 177 SM_TRACE("CAAM controller address = 0x%" PRIxVA, sm_privdata.ctrl_addr); 178 SM_TRACE("CAAM Job Ring address = 0x%" PRIxVA, sm_privdata.jr_addr); 179 180 return CAAM_NO_ERROR; 181 } 182