xref: /rk3399_ARM-atf/drivers/io/io_block.c (revision 6331a31a66cdcf53421d3dccd3067f072c6da175)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <assert.h>
32 #include <debug.h>
33 #include <errno.h>
34 #include <io_block.h>
35 #include <io_driver.h>
36 #include <io_storage.h>
37 #include <platform_def.h>
38 #include <string.h>
39 
40 typedef struct {
41 	io_block_dev_spec_t	*dev_spec;
42 	uintptr_t		base;
43 	size_t			file_pos;
44 	size_t			size;
45 } block_dev_state_t;
46 
47 #define is_power_of_2(x)	((x != 0) && ((x & (x - 1)) == 0))
48 
49 io_type_t device_type_block(void);
50 
51 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
52 		      io_entity_t *entity);
53 static int block_seek(io_entity_t *entity, int mode, ssize_t offset);
54 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
55 		      size_t *length_read);
56 static int block_write(io_entity_t *entity, const uintptr_t buffer,
57 		       size_t length, size_t *length_written);
58 static int block_close(io_entity_t *entity);
59 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
60 static int block_dev_close(io_dev_info_t *dev_info);
61 
62 static const io_dev_connector_t block_dev_connector = {
63 	.dev_open	= block_dev_open
64 };
65 
66 static const io_dev_funcs_t block_dev_funcs = {
67 	.type		= device_type_block,
68 	.open		= block_open,
69 	.seek		= block_seek,
70 	.size		= NULL,
71 	.read		= block_read,
72 	.write		= block_write,
73 	.close		= block_close,
74 	.dev_init	= NULL,
75 	.dev_close	= block_dev_close,
76 };
77 
78 static block_dev_state_t state_pool[MAX_IO_BLOCK_DEVICES];
79 static io_dev_info_t dev_info_pool[MAX_IO_BLOCK_DEVICES];
80 
81 /* Track number of allocated block state */
82 static unsigned int block_dev_count;
83 
84 io_type_t device_type_block(void)
85 {
86 	return IO_TYPE_BLOCK;
87 }
88 
89 /* Locate a block state in the pool, specified by address */
90 static int find_first_block_state(const io_block_dev_spec_t *dev_spec,
91 				  unsigned int *index_out)
92 {
93 	int result = -ENOENT;
94 	for (int index = 0; index < MAX_IO_BLOCK_DEVICES; ++index) {
95 		/* dev_spec is used as identifier since it's unique */
96 		if (state_pool[index].dev_spec == dev_spec) {
97 			result = 0;
98 			*index_out = index;
99 			break;
100 		}
101 	}
102 	return result;
103 }
104 
105 /* Allocate a device info from the pool and return a pointer to it */
106 static int allocate_dev_info(io_dev_info_t **dev_info)
107 {
108 	int result = -ENOMEM;
109 	assert(dev_info != NULL);
110 
111 	if (block_dev_count < MAX_IO_BLOCK_DEVICES) {
112 		unsigned int index = 0;
113 		result = find_first_block_state(NULL, &index);
114 		assert(result == 0);
115 		/* initialize dev_info */
116 		dev_info_pool[index].funcs = &block_dev_funcs;
117 		dev_info_pool[index].info = (uintptr_t)&state_pool[index];
118 		*dev_info = &dev_info_pool[index];
119 		++block_dev_count;
120 	}
121 
122 	return result;
123 }
124 
125 
126 /* Release a device info to the pool */
127 static int free_dev_info(io_dev_info_t *dev_info)
128 {
129 	int result;
130 	unsigned int index = 0;
131 	block_dev_state_t *state;
132 	assert(dev_info != NULL);
133 
134 	state = (block_dev_state_t *)dev_info->info;
135 	result = find_first_block_state(state->dev_spec, &index);
136 	if (result ==  0) {
137 		/* free if device info is valid */
138 		memset(state, 0, sizeof(block_dev_state_t));
139 		memset(dev_info, 0, sizeof(io_dev_info_t));
140 		--block_dev_count;
141 	}
142 
143 	return result;
144 }
145 
146 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
147 		      io_entity_t *entity)
148 {
149 	block_dev_state_t *cur;
150 	io_block_spec_t *region;
151 
152 	assert((dev_info->info != (uintptr_t)NULL) &&
153 	       (spec != (uintptr_t)NULL) &&
154 	       (entity->info == (uintptr_t)NULL));
155 
156 	region = (io_block_spec_t *)spec;
157 	cur = (block_dev_state_t *)dev_info->info;
158 	assert(((region->offset % cur->dev_spec->block_size) == 0) &&
159 	       ((region->length % cur->dev_spec->block_size) == 0));
160 
161 	cur->base = region->offset;
162 	cur->size = region->length;
163 	cur->file_pos = 0;
164 
165 	entity->info = (uintptr_t)cur;
166 	return 0;
167 }
168 
169 /* parameter offset is relative address at here */
170 static int block_seek(io_entity_t *entity, int mode, ssize_t offset)
171 {
172 	block_dev_state_t *cur;
173 
174 	assert(entity->info != (uintptr_t)NULL);
175 
176 	cur = (block_dev_state_t *)entity->info;
177 	assert((offset >= 0) && (offset < cur->size));
178 
179 	switch (mode) {
180 	case IO_SEEK_SET:
181 		cur->file_pos = offset;
182 		break;
183 	case IO_SEEK_CUR:
184 		cur->file_pos += offset;
185 		break;
186 	default:
187 		return -EINVAL;
188 	}
189 	assert(cur->file_pos < cur->size);
190 	return 0;
191 }
192 
193 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
194 		      size_t *length_read)
195 {
196 	block_dev_state_t *cur;
197 	io_block_spec_t *buf;
198 	io_block_ops_t *ops;
199 	size_t aligned_length, skip, count, left, padding, block_size;
200 	int lba;
201 
202 	assert(entity->info != (uintptr_t)NULL);
203 	cur = (block_dev_state_t *)entity->info;
204 	ops = &(cur->dev_spec->ops);
205 	buf = &(cur->dev_spec->buffer);
206 	block_size = cur->dev_spec->block_size;
207 	assert((length <= cur->size) &&
208 	       (length > 0) &&
209 	       (ops->read != 0));
210 
211 	skip = cur->file_pos % block_size;
212 	aligned_length = ((skip + length) + (block_size - 1)) &
213 			 ~(block_size - 1);
214 	padding = aligned_length - (skip + length);
215 	left = aligned_length;
216 	do {
217 		lba = (cur->file_pos + cur->base) / block_size;
218 		if (left >= buf->length) {
219 			/* Since left is larger, it's impossible to padding. */
220 			if (skip) {
221 				/*
222 				 * The beginning address (file_pos) isn't
223 				 * aligned with block size, we need to use
224 				 * block buffer to read block. Since block
225 				 * device is always relied on DMA operation.
226 				 */
227 				count = ops->read(lba, buf->offset,
228 						  buf->length);
229 			} else {
230 				count = ops->read(lba, buffer, buf->length);
231 			}
232 			assert(count == buf->length);
233 			cur->file_pos += count - skip;
234 			if (skip) {
235 				/*
236 				 * Since it's not aligned with block size,
237 				 * block buffer is used to store data.
238 				 */
239 				memcpy((void *)buffer,
240 				       (void *)(buf->offset + skip),
241 				       count - skip);
242 			}
243 			left = left - (count - skip);
244 		} else {
245 			if (skip || padding) {
246 				/*
247 				 * The beginning address (file_pos) isn't
248 				 * aligned with block size, we have to read
249 				 * full block by block buffer instead.
250 				 * The size isn't aligned with block size.
251 				 * Use block buffer to avoid overflow.
252 				 */
253 				count = ops->read(lba, buf->offset, left);
254 			} else
255 				count = ops->read(lba, buffer, left);
256 			assert(count == left);
257 			left = left - (skip + padding);
258 			cur->file_pos += left;
259 			if (skip || padding) {
260 				/*
261 				 * Since it's not aligned with block size,
262 				 * block buffer is used to store data.
263 				 */
264 				memcpy((void *)buffer,
265 				       (void *)(buf->offset + skip),
266 				       left);
267 			}
268 			/* It's already the last block operation */
269 			left = 0;
270 		}
271 		skip = cur->file_pos % block_size;
272 	} while (left > 0);
273 	*length_read = length;
274 
275 	return 0;
276 }
277 
278 static int block_write(io_entity_t *entity, const uintptr_t buffer,
279 		       size_t length, size_t *length_written)
280 {
281 	block_dev_state_t *cur;
282 	io_block_spec_t *buf;
283 	io_block_ops_t *ops;
284 	size_t aligned_length, skip, count, left, padding, block_size;
285 	int lba;
286 
287 	assert(entity->info != (uintptr_t)NULL);
288 	cur = (block_dev_state_t *)entity->info;
289 	ops = &(cur->dev_spec->ops);
290 	buf = &(cur->dev_spec->buffer);
291 	block_size = cur->dev_spec->block_size;
292 	assert((length <= cur->size) &&
293 	       (length > 0) &&
294 	       (ops->read != 0) &&
295 	       (ops->write != 0));
296 
297 	skip = cur->file_pos % block_size;
298 	aligned_length = ((skip + length) + (block_size - 1)) &
299 			 ~(block_size - 1);
300 	padding = aligned_length - (skip + length);
301 	left = aligned_length;
302 	do {
303 		lba = (cur->file_pos + cur->base) / block_size;
304 		if (left >= buf->length) {
305 			/* Since left is larger, it's impossible to padding. */
306 			if (skip) {
307 				/*
308 				 * The beginning address (file_pos) isn't
309 				 * aligned with block size, we need to use
310 				 * block buffer to write block. Since block
311 				 * device is always relied on DMA operation.
312 				 */
313 				count = ops->read(lba, buf->offset,
314 						  buf->length);
315 				assert(count == buf->length);
316 				memcpy((void *)(buf->offset + skip),
317 				       (void *)buffer,
318 				       count - skip);
319 				count = ops->write(lba, buf->offset,
320 						   buf->length);
321 			} else
322 				count = ops->write(lba, buffer, buf->length);
323 			assert(count == buf->length);
324 			cur->file_pos += count - skip;
325 			left = left - (count - skip);
326 		} else {
327 			if (skip || padding) {
328 				/*
329 				 * The beginning address (file_pos) isn't
330 				 * aligned with block size, we need to avoid
331 				 * poluate data in the beginning. Reading and
332 				 * skipping the beginning is the only way.
333 				 * The size isn't aligned with block size.
334 				 * Use block buffer to avoid overflow.
335 				 */
336 				count = ops->read(lba, buf->offset, left);
337 				assert(count == left);
338 				memcpy((void *)(buf->offset + skip),
339 				       (void *)buffer,
340 				       left - skip - padding);
341 				count = ops->write(lba, buf->offset, left);
342 			} else
343 				count = ops->write(lba, buffer, left);
344 			assert(count == left);
345 			cur->file_pos += left - (skip + padding);
346 			/* It's already the last block operation */
347 			left = 0;
348 		}
349 		skip = cur->file_pos % block_size;
350 	} while (left > 0);
351 	*length_written = length;
352 	return 0;
353 }
354 
355 static int block_close(io_entity_t *entity)
356 {
357 	entity->info = (uintptr_t)NULL;
358 	return 0;
359 }
360 
361 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info)
362 {
363 	block_dev_state_t *cur;
364 	io_block_spec_t *buffer;
365 	io_dev_info_t *info;
366 	size_t block_size;
367 	int result;
368 
369 	assert(dev_info != NULL);
370 	result = allocate_dev_info(&info);
371 	if (result)
372 		return -ENOENT;
373 
374 	cur = (block_dev_state_t *)info->info;
375 	/* dev_spec is type of io_block_dev_spec_t. */
376 	cur->dev_spec = (io_block_dev_spec_t *)dev_spec;
377 	buffer = &(cur->dev_spec->buffer);
378 	block_size = cur->dev_spec->block_size;
379 	assert((block_size > 0) &&
380 	       (is_power_of_2(block_size) != 0) &&
381 	       ((buffer->offset % block_size) == 0) &&
382 	       ((buffer->length % block_size) == 0));
383 
384 	*dev_info = info;	/* cast away const */
385 	(void)block_size;
386 	(void)buffer;
387 	return 0;
388 }
389 
390 static int block_dev_close(io_dev_info_t *dev_info)
391 {
392 	return free_dev_info(dev_info);
393 }
394 
395 /* Exported functions */
396 
397 /* Register the Block driver with the IO abstraction */
398 int register_io_dev_block(const io_dev_connector_t **dev_con)
399 {
400 	int result;
401 
402 	assert(dev_con != NULL);
403 
404 	/*
405 	 * Since dev_info isn't really used in io_register_device, always
406 	 * use the same device info at here instead.
407 	 */
408 	result = io_register_device(&dev_info_pool[0]);
409 	if (result == 0)
410 		*dev_con = &block_dev_connector;
411 	return result;
412 }
413