1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <assert.h> 32 #include <debug.h> 33 #include <errno.h> 34 #include <io_block.h> 35 #include <io_driver.h> 36 #include <io_storage.h> 37 #include <platform_def.h> 38 #include <string.h> 39 40 typedef struct { 41 io_block_dev_spec_t *dev_spec; 42 uintptr_t base; 43 size_t file_pos; 44 size_t size; 45 } block_dev_state_t; 46 47 #define is_power_of_2(x) ((x != 0) && ((x & (x - 1)) == 0)) 48 49 io_type_t device_type_block(void); 50 51 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec, 52 io_entity_t *entity); 53 static int block_seek(io_entity_t *entity, int mode, ssize_t offset); 54 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length, 55 size_t *length_read); 56 static int block_write(io_entity_t *entity, const uintptr_t buffer, 57 size_t length, size_t *length_written); 58 static int block_close(io_entity_t *entity); 59 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info); 60 static int block_dev_close(io_dev_info_t *dev_info); 61 62 static const io_dev_connector_t block_dev_connector = { 63 .dev_open = block_dev_open 64 }; 65 66 static const io_dev_funcs_t block_dev_funcs = { 67 .type = device_type_block, 68 .open = block_open, 69 .seek = block_seek, 70 .size = NULL, 71 .read = block_read, 72 .write = block_write, 73 .close = block_close, 74 .dev_init = NULL, 75 .dev_close = block_dev_close, 76 }; 77 78 static block_dev_state_t state_pool[MAX_IO_BLOCK_DEVICES]; 79 static io_dev_info_t dev_info_pool[MAX_IO_BLOCK_DEVICES]; 80 81 /* Track number of allocated block state */ 82 static unsigned int block_dev_count; 83 84 io_type_t device_type_block(void) 85 { 86 return IO_TYPE_BLOCK; 87 } 88 89 /* Locate a block state in the pool, specified by address */ 90 static int find_first_block_state(const io_block_dev_spec_t *dev_spec, 91 unsigned int *index_out) 92 { 93 int result = -ENOENT; 94 for (int index = 0; index < MAX_IO_BLOCK_DEVICES; ++index) { 95 /* dev_spec is used as identifier since it's unique */ 96 if (state_pool[index].dev_spec == dev_spec) { 97 result = 0; 98 *index_out = index; 99 break; 100 } 101 } 102 return result; 103 } 104 105 /* Allocate a device info from the pool and return a pointer to it */ 106 static int allocate_dev_info(io_dev_info_t **dev_info) 107 { 108 int result = -ENOMEM; 109 assert(dev_info != NULL); 110 111 if (block_dev_count < MAX_IO_BLOCK_DEVICES) { 112 unsigned int index = 0; 113 result = find_first_block_state(NULL, &index); 114 assert(result == 0); 115 /* initialize dev_info */ 116 dev_info_pool[index].funcs = &block_dev_funcs; 117 dev_info_pool[index].info = (uintptr_t)&state_pool[index]; 118 *dev_info = &dev_info_pool[index]; 119 ++block_dev_count; 120 } 121 122 return result; 123 } 124 125 126 /* Release a device info to the pool */ 127 static int free_dev_info(io_dev_info_t *dev_info) 128 { 129 int result; 130 unsigned int index = 0; 131 block_dev_state_t *state; 132 assert(dev_info != NULL); 133 134 state = (block_dev_state_t *)dev_info->info; 135 result = find_first_block_state(state->dev_spec, &index); 136 if (result == 0) { 137 /* free if device info is valid */ 138 memset(state, 0, sizeof(block_dev_state_t)); 139 memset(dev_info, 0, sizeof(io_dev_info_t)); 140 --block_dev_count; 141 } 142 143 return result; 144 } 145 146 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec, 147 io_entity_t *entity) 148 { 149 block_dev_state_t *cur; 150 io_block_spec_t *region; 151 152 assert((dev_info->info != (uintptr_t)NULL) && 153 (spec != (uintptr_t)NULL) && 154 (entity->info == (uintptr_t)NULL)); 155 156 region = (io_block_spec_t *)spec; 157 cur = (block_dev_state_t *)dev_info->info; 158 assert(((region->offset % cur->dev_spec->block_size) == 0) && 159 ((region->length % cur->dev_spec->block_size) == 0)); 160 161 cur->base = region->offset; 162 cur->size = region->length; 163 cur->file_pos = 0; 164 165 entity->info = (uintptr_t)cur; 166 return 0; 167 } 168 169 /* parameter offset is relative address at here */ 170 static int block_seek(io_entity_t *entity, int mode, ssize_t offset) 171 { 172 block_dev_state_t *cur; 173 174 assert(entity->info != (uintptr_t)NULL); 175 176 cur = (block_dev_state_t *)entity->info; 177 assert((offset >= 0) && (offset < cur->size)); 178 179 switch (mode) { 180 case IO_SEEK_SET: 181 cur->file_pos = offset; 182 break; 183 case IO_SEEK_CUR: 184 cur->file_pos += offset; 185 break; 186 default: 187 return -EINVAL; 188 } 189 assert(cur->file_pos < cur->size); 190 return 0; 191 } 192 193 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length, 194 size_t *length_read) 195 { 196 block_dev_state_t *cur; 197 io_block_spec_t *buf; 198 io_block_ops_t *ops; 199 size_t aligned_length, skip, count, left, padding, block_size; 200 int lba; 201 int buffer_not_aligned; 202 203 assert(entity->info != (uintptr_t)NULL); 204 cur = (block_dev_state_t *)entity->info; 205 ops = &(cur->dev_spec->ops); 206 buf = &(cur->dev_spec->buffer); 207 block_size = cur->dev_spec->block_size; 208 assert((length <= cur->size) && 209 (length > 0) && 210 (ops->read != 0)); 211 212 if ((buffer & (block_size - 1)) != 0) { 213 /* 214 * buffer isn't aligned with block size. 215 * Block device always relies on DMA operation. 216 * It's better to make the buffer as block size aligned. 217 */ 218 buffer_not_aligned = 1; 219 } else { 220 buffer_not_aligned = 0; 221 } 222 223 skip = cur->file_pos % block_size; 224 aligned_length = ((skip + length) + (block_size - 1)) & 225 ~(block_size - 1); 226 padding = aligned_length - (skip + length); 227 left = aligned_length; 228 do { 229 lba = (cur->file_pos + cur->base) / block_size; 230 if (left >= buf->length) { 231 /* 232 * Since left is larger, it's impossible to padding. 233 * 234 * If buffer isn't aligned, we need to use aligned 235 * buffer instead. 236 */ 237 if (skip || buffer_not_aligned) { 238 /* 239 * The beginning address (file_pos) isn't 240 * aligned with block size, we need to use 241 * block buffer to read block. Since block 242 * device is always relied on DMA operation. 243 */ 244 count = ops->read(lba, buf->offset, 245 buf->length); 246 } else { 247 count = ops->read(lba, buffer, buf->length); 248 } 249 assert(count == buf->length); 250 cur->file_pos += count - skip; 251 if (skip || buffer_not_aligned) { 252 /* 253 * Since there's not aligned block size caused 254 * by skip or not aligned buffer, block buffer 255 * is used to store data. 256 */ 257 memcpy((void *)buffer, 258 (void *)(buf->offset + skip), 259 count - skip); 260 } 261 left = left - (count - skip); 262 } else { 263 if (skip || padding || buffer_not_aligned) { 264 /* 265 * The beginning address (file_pos) isn't 266 * aligned with block size, we have to read 267 * full block by block buffer instead. 268 * The size isn't aligned with block size. 269 * Use block buffer to avoid overflow. 270 * 271 * If buffer isn't aligned, use block buffer 272 * to avoid DMA error. 273 */ 274 count = ops->read(lba, buf->offset, left); 275 } else 276 count = ops->read(lba, buffer, left); 277 assert(count == left); 278 left = left - (skip + padding); 279 cur->file_pos += left; 280 if (skip || padding || buffer_not_aligned) { 281 /* 282 * Since there's not aligned block size or 283 * buffer, block buffer is used to store data. 284 */ 285 memcpy((void *)buffer, 286 (void *)(buf->offset + skip), 287 left); 288 } 289 /* It's already the last block operation */ 290 left = 0; 291 } 292 skip = cur->file_pos % block_size; 293 } while (left > 0); 294 *length_read = length; 295 296 return 0; 297 } 298 299 static int block_write(io_entity_t *entity, const uintptr_t buffer, 300 size_t length, size_t *length_written) 301 { 302 block_dev_state_t *cur; 303 io_block_spec_t *buf; 304 io_block_ops_t *ops; 305 size_t aligned_length, skip, count, left, padding, block_size; 306 int lba; 307 int buffer_not_aligned; 308 309 assert(entity->info != (uintptr_t)NULL); 310 cur = (block_dev_state_t *)entity->info; 311 ops = &(cur->dev_spec->ops); 312 buf = &(cur->dev_spec->buffer); 313 block_size = cur->dev_spec->block_size; 314 assert((length <= cur->size) && 315 (length > 0) && 316 (ops->read != 0) && 317 (ops->write != 0)); 318 319 if ((buffer & (block_size - 1)) != 0) { 320 /* 321 * buffer isn't aligned with block size. 322 * Block device always relies on DMA operation. 323 * It's better to make the buffer as block size aligned. 324 */ 325 buffer_not_aligned = 1; 326 } else { 327 buffer_not_aligned = 0; 328 } 329 330 skip = cur->file_pos % block_size; 331 aligned_length = ((skip + length) + (block_size - 1)) & 332 ~(block_size - 1); 333 padding = aligned_length - (skip + length); 334 left = aligned_length; 335 do { 336 lba = (cur->file_pos + cur->base) / block_size; 337 if (left >= buf->length) { 338 /* Since left is larger, it's impossible to padding. */ 339 if (skip || buffer_not_aligned) { 340 /* 341 * The beginning address (file_pos) isn't 342 * aligned with block size or buffer isn't 343 * aligned, we need to use block buffer to 344 * write block. 345 */ 346 count = ops->read(lba, buf->offset, 347 buf->length); 348 assert(count == buf->length); 349 memcpy((void *)(buf->offset + skip), 350 (void *)buffer, 351 count - skip); 352 count = ops->write(lba, buf->offset, 353 buf->length); 354 } else 355 count = ops->write(lba, buffer, buf->length); 356 assert(count == buf->length); 357 cur->file_pos += count - skip; 358 left = left - (count - skip); 359 } else { 360 if (skip || padding || buffer_not_aligned) { 361 /* 362 * The beginning address (file_pos) isn't 363 * aligned with block size, we need to avoid 364 * poluate data in the beginning. Reading and 365 * skipping the beginning is the only way. 366 * The size isn't aligned with block size. 367 * Use block buffer to avoid overflow. 368 * 369 * If buffer isn't aligned, use block buffer 370 * to avoid DMA error. 371 */ 372 count = ops->read(lba, buf->offset, left); 373 assert(count == left); 374 memcpy((void *)(buf->offset + skip), 375 (void *)buffer, 376 left - skip - padding); 377 count = ops->write(lba, buf->offset, left); 378 } else 379 count = ops->write(lba, buffer, left); 380 assert(count == left); 381 cur->file_pos += left - (skip + padding); 382 /* It's already the last block operation */ 383 left = 0; 384 } 385 skip = cur->file_pos % block_size; 386 } while (left > 0); 387 *length_written = length; 388 return 0; 389 } 390 391 static int block_close(io_entity_t *entity) 392 { 393 entity->info = (uintptr_t)NULL; 394 return 0; 395 } 396 397 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info) 398 { 399 block_dev_state_t *cur; 400 io_block_spec_t *buffer; 401 io_dev_info_t *info; 402 size_t block_size; 403 int result; 404 405 assert(dev_info != NULL); 406 result = allocate_dev_info(&info); 407 if (result) 408 return -ENOENT; 409 410 cur = (block_dev_state_t *)info->info; 411 /* dev_spec is type of io_block_dev_spec_t. */ 412 cur->dev_spec = (io_block_dev_spec_t *)dev_spec; 413 buffer = &(cur->dev_spec->buffer); 414 block_size = cur->dev_spec->block_size; 415 assert((block_size > 0) && 416 (is_power_of_2(block_size) != 0) && 417 ((buffer->offset % block_size) == 0) && 418 ((buffer->length % block_size) == 0)); 419 420 *dev_info = info; /* cast away const */ 421 (void)block_size; 422 (void)buffer; 423 return 0; 424 } 425 426 static int block_dev_close(io_dev_info_t *dev_info) 427 { 428 return free_dev_info(dev_info); 429 } 430 431 /* Exported functions */ 432 433 /* Register the Block driver with the IO abstraction */ 434 int register_io_dev_block(const io_dev_connector_t **dev_con) 435 { 436 int result; 437 438 assert(dev_con != NULL); 439 440 /* 441 * Since dev_info isn't really used in io_register_device, always 442 * use the same device info at here instead. 443 */ 444 result = io_register_device(&dev_info_pool[0]); 445 if (result == 0) 446 *dev_con = &block_dev_connector; 447 return result; 448 } 449