1 /*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <debug.h>
9 #include <errno.h>
10 #include <io_block.h>
11 #include <io_driver.h>
12 #include <io_storage.h>
13 #include <platform_def.h>
14 #include <string.h>
15 #include <utils.h>
16
17 typedef struct {
18 io_block_dev_spec_t *dev_spec;
19 uintptr_t base;
20 size_t file_pos;
21 size_t size;
22 } block_dev_state_t;
23
24 #define is_power_of_2(x) ((x != 0) && ((x & (x - 1)) == 0))
25
26 io_type_t device_type_block(void);
27
28 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
29 io_entity_t *entity);
30 static int block_seek(io_entity_t *entity, int mode, ssize_t offset);
31 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
32 size_t *length_read);
33 static int block_write(io_entity_t *entity, const uintptr_t buffer,
34 size_t length, size_t *length_written);
35 static int block_close(io_entity_t *entity);
36 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
37 static int block_dev_close(io_dev_info_t *dev_info);
38
39 static const io_dev_connector_t block_dev_connector = {
40 .dev_open = block_dev_open
41 };
42
43 static const io_dev_funcs_t block_dev_funcs = {
44 .type = device_type_block,
45 .open = block_open,
46 .seek = block_seek,
47 .size = NULL,
48 .read = block_read,
49 .write = block_write,
50 .close = block_close,
51 .dev_init = NULL,
52 .dev_close = block_dev_close,
53 };
54
55 static block_dev_state_t state_pool[MAX_IO_BLOCK_DEVICES];
56 static io_dev_info_t dev_info_pool[MAX_IO_BLOCK_DEVICES];
57
58 /* Track number of allocated block state */
59 static unsigned int block_dev_count;
60
device_type_block(void)61 io_type_t device_type_block(void)
62 {
63 return IO_TYPE_BLOCK;
64 }
65
66 /* Locate a block state in the pool, specified by address */
find_first_block_state(const io_block_dev_spec_t * dev_spec,unsigned int * index_out)67 static int find_first_block_state(const io_block_dev_spec_t *dev_spec,
68 unsigned int *index_out)
69 {
70 int result = -ENOENT;
71 for (int index = 0; index < MAX_IO_BLOCK_DEVICES; ++index) {
72 /* dev_spec is used as identifier since it's unique */
73 if (state_pool[index].dev_spec == dev_spec) {
74 result = 0;
75 *index_out = index;
76 break;
77 }
78 }
79 return result;
80 }
81
82 /* Allocate a device info from the pool and return a pointer to it */
allocate_dev_info(io_dev_info_t ** dev_info)83 static int allocate_dev_info(io_dev_info_t **dev_info)
84 {
85 int result = -ENOMEM;
86 assert(dev_info != NULL);
87
88 if (block_dev_count < MAX_IO_BLOCK_DEVICES) {
89 unsigned int index = 0;
90 result = find_first_block_state(NULL, &index);
91 assert(result == 0);
92 /* initialize dev_info */
93 dev_info_pool[index].funcs = &block_dev_funcs;
94 dev_info_pool[index].info = (uintptr_t)&state_pool[index];
95 *dev_info = &dev_info_pool[index];
96 ++block_dev_count;
97 }
98
99 return result;
100 }
101
102
103 /* Release a device info to the pool */
free_dev_info(io_dev_info_t * dev_info)104 static int free_dev_info(io_dev_info_t *dev_info)
105 {
106 int result;
107 unsigned int index = 0;
108 block_dev_state_t *state;
109 assert(dev_info != NULL);
110
111 state = (block_dev_state_t *)dev_info->info;
112 result = find_first_block_state(state->dev_spec, &index);
113 if (result == 0) {
114 /* free if device info is valid */
115 zeromem(state, sizeof(block_dev_state_t));
116 zeromem(dev_info, sizeof(io_dev_info_t));
117 --block_dev_count;
118 }
119
120 return result;
121 }
122
block_open(io_dev_info_t * dev_info,const uintptr_t spec,io_entity_t * entity)123 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
124 io_entity_t *entity)
125 {
126 block_dev_state_t *cur;
127 io_block_spec_t *region;
128
129 assert((dev_info->info != (uintptr_t)NULL) &&
130 (spec != (uintptr_t)NULL) &&
131 (entity->info == (uintptr_t)NULL));
132
133 region = (io_block_spec_t *)spec;
134 cur = (block_dev_state_t *)dev_info->info;
135 assert(((region->offset % cur->dev_spec->block_size) == 0) &&
136 ((region->length % cur->dev_spec->block_size) == 0));
137
138 cur->base = region->offset;
139 cur->size = region->length;
140 cur->file_pos = 0;
141
142 entity->info = (uintptr_t)cur;
143 return 0;
144 }
145
146 /* parameter offset is relative address at here */
block_seek(io_entity_t * entity,int mode,ssize_t offset)147 static int block_seek(io_entity_t *entity, int mode, ssize_t offset)
148 {
149 block_dev_state_t *cur;
150
151 assert(entity->info != (uintptr_t)NULL);
152
153 cur = (block_dev_state_t *)entity->info;
154 assert((offset >= 0) && (offset < cur->size));
155
156 switch (mode) {
157 case IO_SEEK_SET:
158 cur->file_pos = offset;
159 break;
160 case IO_SEEK_CUR:
161 cur->file_pos += offset;
162 break;
163 default:
164 return -EINVAL;
165 }
166 assert(cur->file_pos < cur->size);
167 return 0;
168 }
169
block_read(io_entity_t * entity,uintptr_t buffer,size_t length,size_t * length_read)170 static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
171 size_t *length_read)
172 {
173 block_dev_state_t *cur;
174 io_block_spec_t *buf;
175 io_block_ops_t *ops;
176 size_t aligned_length, skip, count, left, padding, block_size;
177 int lba;
178 int buffer_not_aligned;
179
180 assert(entity->info != (uintptr_t)NULL);
181 cur = (block_dev_state_t *)entity->info;
182 ops = &(cur->dev_spec->ops);
183 buf = &(cur->dev_spec->buffer);
184 block_size = cur->dev_spec->block_size;
185 assert((length <= cur->size) &&
186 (length > 0) &&
187 (ops->read != 0));
188
189 if ((buffer & (block_size - 1)) != 0) {
190 /*
191 * buffer isn't aligned with block size.
192 * Block device always relies on DMA operation.
193 * It's better to make the buffer as block size aligned.
194 */
195 buffer_not_aligned = 1;
196 } else {
197 buffer_not_aligned = 0;
198 }
199
200 skip = cur->file_pos % block_size;
201 aligned_length = ((skip + length) + (block_size - 1)) &
202 ~(block_size - 1);
203 padding = aligned_length - (skip + length);
204 left = aligned_length;
205 do {
206 lba = (cur->file_pos + cur->base) / block_size;
207 if (left >= buf->length) {
208 /*
209 * Since left is larger, it's impossible to padding.
210 *
211 * If buffer isn't aligned, we need to use aligned
212 * buffer instead.
213 */
214 if (skip || buffer_not_aligned) {
215 /*
216 * The beginning address (file_pos) isn't
217 * aligned with block size, we need to use
218 * block buffer to read block. Since block
219 * device is always relied on DMA operation.
220 */
221 count = ops->read(lba, buf->offset,
222 buf->length);
223 } else {
224 count = ops->read(lba, buffer, buf->length);
225 }
226 assert(count == buf->length);
227 cur->file_pos += count - skip;
228 if (skip || buffer_not_aligned) {
229 /*
230 * Since there's not aligned block size caused
231 * by skip or not aligned buffer, block buffer
232 * is used to store data.
233 */
234 memcpy((void *)buffer,
235 (void *)(buf->offset + skip),
236 count - skip);
237 }
238 left = left - (count - skip);
239 } else {
240 if (skip || padding || buffer_not_aligned) {
241 /*
242 * The beginning address (file_pos) isn't
243 * aligned with block size, we have to read
244 * full block by block buffer instead.
245 * The size isn't aligned with block size.
246 * Use block buffer to avoid overflow.
247 *
248 * If buffer isn't aligned, use block buffer
249 * to avoid DMA error.
250 */
251 count = ops->read(lba, buf->offset, left);
252 } else
253 count = ops->read(lba, buffer, left);
254 assert(count == left);
255 left = left - (skip + padding);
256 cur->file_pos += left;
257 if (skip || padding || buffer_not_aligned) {
258 /*
259 * Since there's not aligned block size or
260 * buffer, block buffer is used to store data.
261 */
262 memcpy((void *)buffer,
263 (void *)(buf->offset + skip),
264 left);
265 }
266 /* It's already the last block operation */
267 left = 0;
268 }
269 skip = cur->file_pos % block_size;
270 } while (left > 0);
271 *length_read = length;
272
273 return 0;
274 }
275
block_write(io_entity_t * entity,const uintptr_t buffer,size_t length,size_t * length_written)276 static int block_write(io_entity_t *entity, const uintptr_t buffer,
277 size_t length, size_t *length_written)
278 {
279 block_dev_state_t *cur;
280 io_block_spec_t *buf;
281 io_block_ops_t *ops;
282 size_t aligned_length, skip, count, left, padding, block_size;
283 int lba;
284 int buffer_not_aligned;
285
286 assert(entity->info != (uintptr_t)NULL);
287 cur = (block_dev_state_t *)entity->info;
288 ops = &(cur->dev_spec->ops);
289 buf = &(cur->dev_spec->buffer);
290 block_size = cur->dev_spec->block_size;
291 assert((length <= cur->size) &&
292 (length > 0) &&
293 (ops->read != 0) &&
294 (ops->write != 0));
295
296 if ((buffer & (block_size - 1)) != 0) {
297 /*
298 * buffer isn't aligned with block size.
299 * Block device always relies on DMA operation.
300 * It's better to make the buffer as block size aligned.
301 */
302 buffer_not_aligned = 1;
303 } else {
304 buffer_not_aligned = 0;
305 }
306
307 skip = cur->file_pos % block_size;
308 aligned_length = ((skip + length) + (block_size - 1)) &
309 ~(block_size - 1);
310 padding = aligned_length - (skip + length);
311 left = aligned_length;
312 do {
313 lba = (cur->file_pos + cur->base) / block_size;
314 if (left >= buf->length) {
315 /* Since left is larger, it's impossible to padding. */
316 if (skip || buffer_not_aligned) {
317 /*
318 * The beginning address (file_pos) isn't
319 * aligned with block size or buffer isn't
320 * aligned, we need to use block buffer to
321 * write block.
322 */
323 count = ops->read(lba, buf->offset,
324 buf->length);
325 assert(count == buf->length);
326 memcpy((void *)(buf->offset + skip),
327 (void *)buffer,
328 count - skip);
329 count = ops->write(lba, buf->offset,
330 buf->length);
331 } else
332 count = ops->write(lba, buffer, buf->length);
333 assert(count == buf->length);
334 cur->file_pos += count - skip;
335 left = left - (count - skip);
336 } else {
337 if (skip || padding || buffer_not_aligned) {
338 /*
339 * The beginning address (file_pos) isn't
340 * aligned with block size, we need to avoid
341 * poluate data in the beginning. Reading and
342 * skipping the beginning is the only way.
343 * The size isn't aligned with block size.
344 * Use block buffer to avoid overflow.
345 *
346 * If buffer isn't aligned, use block buffer
347 * to avoid DMA error.
348 */
349 count = ops->read(lba, buf->offset, left);
350 assert(count == left);
351 memcpy((void *)(buf->offset + skip),
352 (void *)buffer,
353 left - skip - padding);
354 count = ops->write(lba, buf->offset, left);
355 } else
356 count = ops->write(lba, buffer, left);
357 assert(count == left);
358 cur->file_pos += left - (skip + padding);
359 /* It's already the last block operation */
360 left = 0;
361 }
362 skip = cur->file_pos % block_size;
363 } while (left > 0);
364 *length_written = length;
365 return 0;
366 }
367
block_close(io_entity_t * entity)368 static int block_close(io_entity_t *entity)
369 {
370 entity->info = (uintptr_t)NULL;
371 return 0;
372 }
373
block_dev_open(const uintptr_t dev_spec,io_dev_info_t ** dev_info)374 static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info)
375 {
376 block_dev_state_t *cur;
377 io_block_spec_t *buffer;
378 io_dev_info_t *info;
379 size_t block_size;
380 int result;
381
382 assert(dev_info != NULL);
383 result = allocate_dev_info(&info);
384 if (result)
385 return -ENOENT;
386
387 cur = (block_dev_state_t *)info->info;
388 /* dev_spec is type of io_block_dev_spec_t. */
389 cur->dev_spec = (io_block_dev_spec_t *)dev_spec;
390 buffer = &(cur->dev_spec->buffer);
391 block_size = cur->dev_spec->block_size;
392 assert((block_size > 0) &&
393 (is_power_of_2(block_size) != 0) &&
394 ((buffer->offset % block_size) == 0) &&
395 ((buffer->length % block_size) == 0));
396
397 *dev_info = info; /* cast away const */
398 (void)block_size;
399 (void)buffer;
400 return 0;
401 }
402
block_dev_close(io_dev_info_t * dev_info)403 static int block_dev_close(io_dev_info_t *dev_info)
404 {
405 return free_dev_info(dev_info);
406 }
407
408 /* Exported functions */
409
410 /* Register the Block driver with the IO abstraction */
register_io_dev_block(const io_dev_connector_t ** dev_con)411 int register_io_dev_block(const io_dev_connector_t **dev_con)
412 {
413 int result;
414
415 assert(dev_con != NULL);
416
417 /*
418 * Since dev_info isn't really used in io_register_device, always
419 * use the same device info at here instead.
420 */
421 result = io_register_device(&dev_info_pool[0]);
422 if (result == 0)
423 *dev_con = &block_dev_connector;
424 return result;
425 }
426