• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (C) 2007-2008 The Android Open Source Project
2 **
3 ** This software is licensed under the terms of the GNU General Public
4 ** License version 2, as published by the Free Software Foundation, and
5 ** may be copied, distributed, and modified under those terms.
6 **
7 ** This program is distributed in the hope that it will be useful,
8 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
9 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 ** GNU General Public License for more details.
11 */
12 #include "qemu_file.h"
13 #include "goldfish_nand_reg.h"
14 #include "goldfish_nand.h"
15 #include "android/utils/tempfile.h"
16 #include "qemu_debug.h"
17 #include "android/android.h"
18 
19 #ifdef TARGET_I386
20 #include "kvm.h"
21 #endif
22 
23 #define  DEBUG  1
24 #if DEBUG
25 #  define  D(...)    VERBOSE_PRINT(init,__VA_ARGS__)
26 #  define  D_ACTIVE  VERBOSE_CHECK(init)
27 #  define  T(...)    VERBOSE_PRINT(nand_limits,__VA_ARGS__)
28 #  define  T_ACTIVE  VERBOSE_CHECK(nand_limits)
29 #else
30 #  define  D(...)    ((void)0)
31 #  define  D_ACTIVE  0
32 #  define  T(...)    ((void)0)
33 #  define  T_ACTIVE  0
34 #endif
35 
36 /* lseek uses 64-bit offsets on Darwin. */
37 /* prefer lseek64 on Linux              */
38 #ifdef __APPLE__
39 #  define  llseek  lseek
40 #elif defined(__linux__)
41 #  define  llseek  lseek64
42 #endif
43 
44 #define  XLOG  xlog
45 
46 static void
xlog(const char * format,...)47 xlog( const char*  format, ... )
48 {
49     va_list  args;
50     va_start(args, format);
51     fprintf(stderr, "NAND: ");
52     vfprintf(stderr, format, args);
53     va_end(args);
54 }
55 
56 /* Information on a single device/nand image used by the emulator
57  */
58 typedef struct {
59     char*      devname;      /* name for this device (not null-terminated, use len below) */
60     size_t     devname_len;
61     uint8_t*   data;         /* buffer for read/write actions to underlying image */
62     int        fd;
63     uint32_t   flags;
64     uint32_t   page_size;
65     uint32_t   extra_size;
66     uint32_t   erase_size;   /* size of the data buffer mentioned above */
67     uint64_t   max_size;     /* Capacity limit for the image. The actual underlying
68                               * file may be smaller. */
69 } nand_dev;
70 
71 nand_threshold    android_nand_write_threshold;
72 nand_threshold    android_nand_read_threshold;
73 
74 #ifdef CONFIG_NAND_THRESHOLD
75 
76 /* update a threshold, return 1 if limit is hit, 0 otherwise */
77 static void
nand_threshold_update(nand_threshold * t,uint32_t len)78 nand_threshold_update( nand_threshold*  t, uint32_t  len )
79 {
80     if (t->counter < t->limit) {
81         uint64_t  avail = t->limit - t->counter;
82         if (avail > len)
83             avail = len;
84 
85         if (t->counter == 0) {
86             T("%s: starting threshold counting to %lld",
87               __FUNCTION__, t->limit);
88         }
89         t->counter += avail;
90         if (t->counter >= t->limit) {
91             /* threshold reach, send a signal to an external process */
92             T( "%s: sending signal %d to pid %d !",
93                __FUNCTION__, t->signal, t->pid );
94 
95             kill( t->pid, t->signal );
96         }
97     }
98     return;
99 }
100 
101 #define  NAND_UPDATE_READ_THRESHOLD(len)  \
102     nand_threshold_update( &android_nand_read_threshold, (uint32_t)(len) )
103 
104 #define  NAND_UPDATE_WRITE_THRESHOLD(len)  \
105     nand_threshold_update( &android_nand_write_threshold, (uint32_t)(len) )
106 
107 #else /* !NAND_THRESHOLD */
108 
109 #define  NAND_UPDATE_READ_THRESHOLD(len)  \
110     do {} while (0)
111 
112 #define  NAND_UPDATE_WRITE_THRESHOLD(len)  \
113     do {} while (0)
114 
115 #endif /* !NAND_THRESHOLD */
116 
117 static nand_dev *nand_devs = NULL;
118 static uint32_t nand_dev_count = 0;
119 
120 /* The controller is the single access point for all NAND images currently
121  * attached to the system.
122  */
123 typedef struct {
124     uint32_t base;
125 
126     // register state
127     uint32_t dev;            /* offset in nand_devs for the device that is
128                               * currently being accessed */
129     uint32_t addr_low;
130     uint32_t addr_high;
131     uint32_t transfer_size;
132     uint32_t data;
133     uint32_t batch_addr_low;
134     uint32_t batch_addr_high;
135     uint32_t result;
136 } nand_dev_controller_state;
137 
138 /* update this everytime you change the nand_dev_controller_state structure
139  * 1: initial version, saving only nand_dev_controller_state fields
140  * 2: saving actual disk contents as well
141  * 3: use the correct data length and truncate to avoid padding.
142  */
143 #define  NAND_DEV_STATE_SAVE_VERSION  4
144 
145 #define  QFIELD_STRUCT  nand_dev_controller_state
146 QFIELD_BEGIN(nand_dev_controller_state_fields)
QFIELD_INT32(dev)147     QFIELD_INT32(dev),
148     QFIELD_INT32(addr_low),
149     QFIELD_INT32(addr_high),
150     QFIELD_INT32(transfer_size),
151     QFIELD_INT32(data),
152     QFIELD_INT32(batch_addr_low),
153     QFIELD_INT32(batch_addr_high),
154     QFIELD_INT32(result),
155 QFIELD_END
156 
157 
158 /* EINTR-proof read - due to SIGALRM in use elsewhere */
159 static int  do_read(int  fd, void*  buf, size_t  size)
160 {
161     int  ret;
162     do {
163         ret = read(fd, buf, size);
164     } while (ret < 0 && errno == EINTR);
165 
166     return ret;
167 }
168 
169 /* EINTR-proof write - due to SIGALRM in use elsewhere */
do_write(int fd,const void * buf,size_t size)170 static int  do_write(int  fd, const void*  buf, size_t  size)
171 {
172     int  ret;
173     do {
174         ret = write(fd, buf, size);
175     } while (ret < 0 && errno == EINTR);
176 
177     return ret;
178 }
179 
180 /* EINTR-proof lseek - due to SIGALRM in use elsewhere */
do_lseek(int fd,off_t offset,int whence)181 static int  do_lseek(int  fd, off_t offset, int whence)
182 {
183     int  ret;
184     do {
185         ret = lseek(fd, offset, whence);
186     } while (ret < 0 && errno == EINTR);
187 
188     return ret;
189 }
190 
191 /* EINTR-proof ftruncate - due to SIGALRM in use elsewhere */
do_ftruncate(int fd,size_t size)192 static int  do_ftruncate(int  fd, size_t  size)
193 {
194     int  ret;
195     do {
196         ret = ftruncate(fd, size);
197     } while (ret < 0 && errno == EINTR);
198 
199     return ret;
200 }
201 
202 #define NAND_DEV_SAVE_DISK_BUF_SIZE 2048
203 
204 
205 /**
206  * Copies the current contents of a disk image into the snapshot file.
207  *
208  * TODO optimize this using some kind of copy-on-write mechanism for
209  *      unchanged disk sections.
210  */
nand_dev_save_disk_state(QEMUFile * f,nand_dev * dev)211 static void  nand_dev_save_disk_state(QEMUFile *f, nand_dev *dev)
212 {
213     int buf_size = NAND_DEV_SAVE_DISK_BUF_SIZE;
214     uint8_t buffer[NAND_DEV_SAVE_DISK_BUF_SIZE] = {0};
215     int ret;
216     uint64_t total_copied = 0;
217 
218     /* Size of file to restore, hence size of data block following.
219      * TODO Work out whether to use lseek64 here. */
220 
221     ret = do_lseek(dev->fd, 0, SEEK_END);
222     if (ret < 0) {
223       XLOG("%s EOF seek failed: %s\n", __FUNCTION__, strerror(errno));
224       qemu_file_set_error(f);
225       return;
226     }
227     const uint64_t total_size = ret;
228     qemu_put_be64(f, total_size);
229 
230     /* copy all data from the stream to the stored image */
231     ret = do_lseek(dev->fd, 0, SEEK_SET);
232     if (ret < 0) {
233         XLOG("%s seek failed: %s\n", __FUNCTION__, strerror(errno));
234         qemu_file_set_error(f);
235         return;
236     }
237     do {
238         ret = do_read(dev->fd, buffer, buf_size);
239         if (ret < 0) {
240             XLOG("%s read failed: %s\n", __FUNCTION__, strerror(errno));
241             qemu_file_set_error(f);
242             return;
243         }
244         qemu_put_buffer(f, buffer, ret);
245 
246         total_copied += ret;
247     }
248     while (ret == buf_size && total_copied < dev->max_size);
249 
250     /* TODO Maybe check that we've written total_size bytes */
251 }
252 
253 
254 /**
255  * Saves the state of all disks managed by this controller to a snapshot file.
256  */
nand_dev_save_disks(QEMUFile * f)257 static void nand_dev_save_disks(QEMUFile *f)
258 {
259     int i;
260     for (i = 0; i < nand_dev_count; i++) {
261         nand_dev_save_disk_state(f, nand_devs + i);
262     }
263 }
264 
265 /**
266  * Overwrites the contents of the disk image managed by this device with the
267  * contents as they were at the point the snapshot was made.
268  */
nand_dev_load_disk_state(QEMUFile * f,nand_dev * dev)269 static int  nand_dev_load_disk_state(QEMUFile *f, nand_dev *dev)
270 {
271     int buf_size = NAND_DEV_SAVE_DISK_BUF_SIZE;
272     uint8_t buffer[NAND_DEV_SAVE_DISK_BUF_SIZE] = {0};
273     int ret;
274 
275     /* File size for restore and truncate */
276     uint64_t total_size = qemu_get_be64(f);
277     if (total_size > dev->max_size) {
278         XLOG("%s, restore failed: size required (%lld) exceeds device limit (%lld)\n",
279              __FUNCTION__, total_size, dev->max_size);
280         return -EIO;
281     }
282 
283     /* overwrite disk contents with snapshot contents */
284     uint64_t next_offset = 0;
285     ret = do_lseek(dev->fd, 0, SEEK_SET);
286     if (ret < 0) {
287         XLOG("%s seek failed: %s\n", __FUNCTION__, strerror(errno));
288         return -EIO;
289     }
290     while (next_offset < total_size) {
291         /* snapshot buffer may not be an exact multiple of buf_size
292          * if necessary, adjust buffer size for last copy operation */
293         if (total_size - next_offset < buf_size) {
294             buf_size = total_size - next_offset;
295         }
296 
297         ret = qemu_get_buffer(f, buffer, buf_size);
298         if (ret != buf_size) {
299             XLOG("%s read failed: expected %d bytes but got %d\n",
300                  __FUNCTION__, buf_size, ret);
301             return -EIO;
302         }
303         ret = do_write(dev->fd, buffer, buf_size);
304         if (ret != buf_size) {
305             XLOG("%s, write failed: %s\n", __FUNCTION__, strerror(errno));
306             return -EIO;
307         }
308 
309         next_offset += buf_size;
310     }
311 
312     ret = do_ftruncate(dev->fd, total_size);
313     if (ret < 0) {
314         XLOG("%s ftruncate failed: %s\n", __FUNCTION__, strerror(errno));
315         return -EIO;
316     }
317 
318     return 0;
319 }
320 
321 /**
322  * Restores the state of all disks managed by this driver from a snapshot file.
323  */
nand_dev_load_disks(QEMUFile * f)324 static int nand_dev_load_disks(QEMUFile *f)
325 {
326     int i, ret;
327     for (i = 0; i < nand_dev_count; i++) {
328         ret = nand_dev_load_disk_state(f, nand_devs + i);
329         if (ret)
330             return ret; // abort on error
331     }
332 
333     return 0;
334 }
335 
nand_dev_controller_state_save(QEMUFile * f,void * opaque)336 static void  nand_dev_controller_state_save(QEMUFile *f, void  *opaque)
337 {
338     nand_dev_controller_state* s = opaque;
339 
340     qemu_put_struct(f, nand_dev_controller_state_fields, s);
341 
342     /* The guest will continue writing to the disk image after the state has
343      * been saved. To guarantee that the state is identical after resume, save
344      * a copy of the current disk state in the snapshot.
345      */
346     nand_dev_save_disks(f);
347 }
348 
nand_dev_controller_state_load(QEMUFile * f,void * opaque,int version_id)349 static int   nand_dev_controller_state_load(QEMUFile *f, void  *opaque, int  version_id)
350 {
351     nand_dev_controller_state*  s = opaque;
352     int ret;
353 
354     if (version_id != NAND_DEV_STATE_SAVE_VERSION)
355         return -1;
356 
357     if ((ret = qemu_get_struct(f, nand_dev_controller_state_fields, s)))
358         return ret;
359     if ((ret = nand_dev_load_disks(f)))
360         return ret;
361 
362     return 0;
363 }
364 
nand_dev_read_file(nand_dev * dev,uint32_t data,uint64_t addr,uint32_t total_len)365 static uint32_t nand_dev_read_file(nand_dev *dev, uint32_t data, uint64_t addr, uint32_t total_len)
366 {
367     uint32_t len = total_len;
368     size_t read_len = dev->erase_size;
369     int eof = 0;
370 
371     NAND_UPDATE_READ_THRESHOLD(total_len);
372 
373     do_lseek(dev->fd, addr, SEEK_SET);
374     while(len > 0) {
375         if(read_len < dev->erase_size) {
376             memset(dev->data, 0xff, dev->erase_size);
377             read_len = dev->erase_size;
378             eof = 1;
379         }
380         if(len < read_len)
381             read_len = len;
382         if(!eof) {
383             read_len = do_read(dev->fd, dev->data, read_len);
384         }
385 #ifdef TARGET_I386
386         if (kvm_enabled())
387             cpu_synchronize_state(cpu_single_env, 0);
388 #endif
389         cpu_memory_rw_debug(cpu_single_env, data, dev->data, read_len, 1);
390         data += read_len;
391         len -= read_len;
392     }
393     return total_len;
394 }
395 
nand_dev_write_file(nand_dev * dev,uint32_t data,uint64_t addr,uint32_t total_len)396 static uint32_t nand_dev_write_file(nand_dev *dev, uint32_t data, uint64_t addr, uint32_t total_len)
397 {
398     uint32_t len = total_len;
399     size_t write_len = dev->erase_size;
400     int ret;
401 
402     NAND_UPDATE_WRITE_THRESHOLD(total_len);
403 
404     do_lseek(dev->fd, addr, SEEK_SET);
405     while(len > 0) {
406         if(len < write_len)
407             write_len = len;
408 #ifdef TARGET_I386
409         if (kvm_enabled())
410                 cpu_synchronize_state(cpu_single_env, 0);
411 #endif
412         cpu_memory_rw_debug(cpu_single_env, data, dev->data, write_len, 0);
413         ret = do_write(dev->fd, dev->data, write_len);
414         if(ret < write_len) {
415             XLOG("nand_dev_write_file, write failed: %s\n", strerror(errno));
416             break;
417         }
418         data += write_len;
419         len -= write_len;
420     }
421     return total_len - len;
422 }
423 
nand_dev_erase_file(nand_dev * dev,uint64_t addr,uint32_t total_len)424 static uint32_t nand_dev_erase_file(nand_dev *dev, uint64_t addr, uint32_t total_len)
425 {
426     uint32_t len = total_len;
427     size_t write_len = dev->erase_size;
428     int ret;
429 
430     do_lseek(dev->fd, addr, SEEK_SET);
431     memset(dev->data, 0xff, dev->erase_size);
432     while(len > 0) {
433         if(len < write_len)
434             write_len = len;
435         ret = do_write(dev->fd, dev->data, write_len);
436         if(ret < write_len) {
437             XLOG( "nand_dev_write_file, write failed: %s\n", strerror(errno));
438             break;
439         }
440         len -= write_len;
441     }
442     return total_len - len;
443 }
444 
445 /* this is a huge hack required to make the PowerPC emulator binary usable
446  * on Mac OS X. If you define this function as 'static', the emulated kernel
447  * will panic when attempting to mount the /data partition.
448  *
449  * worse, if you do *not* define the function as static on Linux-x86, the
450  * emulated kernel will also panic !?
451  *
452  * I still wonder if this is a compiler bug, or due to some nasty thing the
453  * emulator does with CPU registers during execution of the translated code.
454  */
455 #if !(defined __APPLE__ && defined __powerpc__)
456 static
457 #endif
nand_dev_do_cmd(nand_dev_controller_state * s,uint32_t cmd)458 uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd)
459 {
460     uint32_t size;
461     uint64_t addr;
462     nand_dev *dev;
463 
464     if (cmd == NAND_CMD_WRITE_BATCH || cmd == NAND_CMD_READ_BATCH ||
465         cmd == NAND_CMD_ERASE_BATCH) {
466         struct batch_data bd;
467         uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low;
468 
469         cpu_physical_memory_read(bd_addr, (void*)&bd, sizeof(struct batch_data));
470         s->dev = bd.dev;
471         s->addr_low = bd.addr_low;
472         s->addr_high = bd.addr_high;
473         s->transfer_size = bd.transfer_size;
474         s->data = bd.data;
475     }
476     addr = s->addr_low | ((uint64_t)s->addr_high << 32);
477     size = s->transfer_size;
478     if(s->dev >= nand_dev_count)
479         return 0;
480     dev = nand_devs + s->dev;
481 
482     switch(cmd) {
483     case NAND_CMD_GET_DEV_NAME:
484         if(size > dev->devname_len)
485             size = dev->devname_len;
486 #ifdef TARGET_I386
487         if (kvm_enabled())
488                 cpu_synchronize_state(cpu_single_env, 0);
489 #endif
490         cpu_memory_rw_debug(cpu_single_env, s->data, (uint8_t*)dev->devname, size, 1);
491         return size;
492     case NAND_CMD_READ_BATCH:
493     case NAND_CMD_READ:
494         if(addr >= dev->max_size)
495             return 0;
496         if(size > dev->max_size - addr)
497             size = dev->max_size - addr;
498         if(dev->fd >= 0)
499             return nand_dev_read_file(dev, s->data, addr, size);
500 #ifdef TARGET_I386
501         if (kvm_enabled())
502                 cpu_synchronize_state(cpu_single_env, 0);
503 #endif
504         cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1);
505         return size;
506     case NAND_CMD_WRITE_BATCH:
507     case NAND_CMD_WRITE:
508         if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
509             return 0;
510         if(addr >= dev->max_size)
511             return 0;
512         if(size > dev->max_size - addr)
513             size = dev->max_size - addr;
514         if(dev->fd >= 0)
515             return nand_dev_write_file(dev, s->data, addr, size);
516 #ifdef TARGET_I386
517         if (kvm_enabled())
518                 cpu_synchronize_state(cpu_single_env, 0);
519 #endif
520         cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0);
521         return size;
522     case NAND_CMD_ERASE_BATCH:
523     case NAND_CMD_ERASE:
524         if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
525             return 0;
526         if(addr >= dev->max_size)
527             return 0;
528         if(size > dev->max_size - addr)
529             size = dev->max_size - addr;
530         if(dev->fd >= 0)
531             return nand_dev_erase_file(dev, addr, size);
532         memset(&dev->data[addr], 0xff, size);
533         return size;
534     case NAND_CMD_BLOCK_BAD_GET: // no bad block support
535         return 0;
536     case NAND_CMD_BLOCK_BAD_SET:
537         if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
538             return 0;
539         return 0;
540     default:
541         cpu_abort(cpu_single_env, "nand_dev_do_cmd: Bad command %x\n", cmd);
542         return 0;
543     }
544 }
545 
546 /* I/O write */
nand_dev_write(void * opaque,target_phys_addr_t offset,uint32_t value)547 static void nand_dev_write(void *opaque, target_phys_addr_t offset, uint32_t value)
548 {
549     nand_dev_controller_state *s = (nand_dev_controller_state *)opaque;
550 
551     switch (offset) {
552     case NAND_DEV:
553         s->dev = value;
554         if(s->dev >= nand_dev_count) {
555             cpu_abort(cpu_single_env, "nand_dev_write: Bad dev %x\n", value);
556         }
557         break;
558     case NAND_ADDR_HIGH:
559         s->addr_high = value;
560         break;
561     case NAND_ADDR_LOW:
562         s->addr_low = value;
563         break;
564     case NAND_BATCH_ADDR_LOW:
565         s->batch_addr_low = value;
566         break;
567     case NAND_BATCH_ADDR_HIGH:
568         s->batch_addr_high = value;
569         break;
570     case NAND_TRANSFER_SIZE:
571         s->transfer_size = value;
572         break;
573     case NAND_DATA:
574         s->data = value;
575         break;
576     case NAND_COMMAND:
577         s->result = nand_dev_do_cmd(s, value);
578         if (value == NAND_CMD_WRITE_BATCH || value == NAND_CMD_READ_BATCH ||
579             value == NAND_CMD_ERASE_BATCH) {
580             struct batch_data bd;
581             uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low;
582             bd.result = s->result;
583             cpu_physical_memory_write(bd_addr, (void*)&bd, sizeof(struct batch_data));
584         }
585         break;
586     default:
587         cpu_abort(cpu_single_env, "nand_dev_write: Bad offset %x\n", offset);
588         break;
589     }
590 }
591 
592 /* I/O read */
nand_dev_read(void * opaque,target_phys_addr_t offset)593 static uint32_t nand_dev_read(void *opaque, target_phys_addr_t offset)
594 {
595     nand_dev_controller_state *s = (nand_dev_controller_state *)opaque;
596     nand_dev *dev;
597 
598     switch (offset) {
599     case NAND_VERSION:
600         return NAND_VERSION_CURRENT;
601     case NAND_NUM_DEV:
602         return nand_dev_count;
603     case NAND_RESULT:
604         return s->result;
605     }
606 
607     if(s->dev >= nand_dev_count)
608         return 0;
609 
610     dev = nand_devs + s->dev;
611 
612     switch (offset) {
613     case NAND_DEV_FLAGS:
614         return dev->flags;
615 
616     case NAND_DEV_NAME_LEN:
617         return dev->devname_len;
618 
619     case NAND_DEV_PAGE_SIZE:
620         return dev->page_size;
621 
622     case NAND_DEV_EXTRA_SIZE:
623         return dev->extra_size;
624 
625     case NAND_DEV_ERASE_SIZE:
626         return dev->erase_size;
627 
628     case NAND_DEV_SIZE_LOW:
629         return (uint32_t)dev->max_size;
630 
631     case NAND_DEV_SIZE_HIGH:
632         return (uint32_t)(dev->max_size >> 32);
633 
634     default:
635         cpu_abort(cpu_single_env, "nand_dev_read: Bad offset %x\n", offset);
636         return 0;
637     }
638 }
639 
640 static CPUReadMemoryFunc *nand_dev_readfn[] = {
641    nand_dev_read,
642    nand_dev_read,
643    nand_dev_read
644 };
645 
646 static CPUWriteMemoryFunc *nand_dev_writefn[] = {
647    nand_dev_write,
648    nand_dev_write,
649    nand_dev_write
650 };
651 
652 /* initialize the QFB device */
nand_dev_init(uint32_t base)653 void nand_dev_init(uint32_t base)
654 {
655     int iomemtype;
656     static int  instance_id = 0;
657     nand_dev_controller_state *s;
658 
659     s = (nand_dev_controller_state *)qemu_mallocz(sizeof(nand_dev_controller_state));
660     iomemtype = cpu_register_io_memory(nand_dev_readfn, nand_dev_writefn, s);
661     cpu_register_physical_memory(base, 0x00000fff, iomemtype);
662     s->base = base;
663 
664     register_savevm( "nand_dev", instance_id++, NAND_DEV_STATE_SAVE_VERSION,
665                       nand_dev_controller_state_save, nand_dev_controller_state_load, s);
666 }
667 
arg_match(const char * a,const char * b,size_t b_len)668 static int arg_match(const char *a, const char *b, size_t b_len)
669 {
670     while(*a && b_len--) {
671         if(*a++ != *b++)
672             return 0;
673     }
674     return b_len == 0;
675 }
676 
nand_add_dev(const char * arg)677 void nand_add_dev(const char *arg)
678 {
679     uint64_t dev_size = 0;
680     const char *next_arg;
681     const char *value;
682     size_t arg_len, value_len;
683     nand_dev *new_devs, *dev;
684     char *devname = NULL;
685     size_t devname_len = 0;
686     char *initfilename = NULL;
687     char *rwfilename = NULL;
688     int initfd = -1;
689     int rwfd = -1;
690     int read_only = 0;
691     int pad;
692     ssize_t read_size;
693     uint32_t page_size = 2048;
694     uint32_t extra_size = 64;
695     uint32_t erase_pages = 64;
696 
697     VERBOSE_PRINT(init, "%s: %s", __FUNCTION__, arg);
698 
699     while(arg) {
700         next_arg = strchr(arg, ',');
701         value = strchr(arg, '=');
702         if(next_arg != NULL) {
703             arg_len = next_arg - arg;
704             next_arg++;
705             if(value >= next_arg)
706                 value = NULL;
707         }
708         else
709             arg_len = strlen(arg);
710         if(value != NULL) {
711             size_t new_arg_len = value - arg;
712             value_len = arg_len - new_arg_len - 1;
713             arg_len = new_arg_len;
714             value++;
715         }
716         else
717             value_len = 0;
718 
719         if(devname == NULL) {
720             if(value != NULL)
721                 goto bad_arg_and_value;
722             devname_len = arg_len;
723             devname = malloc(arg_len+1);
724             if(devname == NULL)
725                 goto out_of_memory;
726             memcpy(devname, arg, arg_len);
727             devname[arg_len] = 0;
728         }
729         else if(value == NULL) {
730             if(arg_match("readonly", arg, arg_len)) {
731                 read_only = 1;
732             }
733             else {
734                 XLOG("bad arg: %.*s\n", arg_len, arg);
735                 exit(1);
736             }
737         }
738         else {
739             if(arg_match("size", arg, arg_len)) {
740                 char *ep;
741                 dev_size = strtoull(value, &ep, 0);
742                 if(ep != value + value_len)
743                     goto bad_arg_and_value;
744             }
745             else if(arg_match("pagesize", arg, arg_len)) {
746                 char *ep;
747                 page_size = strtoul(value, &ep, 0);
748                 if(ep != value + value_len)
749                     goto bad_arg_and_value;
750             }
751             else if(arg_match("extrasize", arg, arg_len)) {
752                 char *ep;
753                 extra_size = strtoul(value, &ep, 0);
754                 if(ep != value + value_len)
755                     goto bad_arg_and_value;
756             }
757             else if(arg_match("erasepages", arg, arg_len)) {
758                 char *ep;
759                 erase_pages = strtoul(value, &ep, 0);
760                 if(ep != value + value_len)
761                     goto bad_arg_and_value;
762             }
763             else if(arg_match("initfile", arg, arg_len)) {
764                 initfilename = malloc(value_len + 1);
765                 if(initfilename == NULL)
766                     goto out_of_memory;
767                 memcpy(initfilename, value, value_len);
768                 initfilename[value_len] = '\0';
769             }
770             else if(arg_match("file", arg, arg_len)) {
771                 rwfilename = malloc(value_len + 1);
772                 if(rwfilename == NULL)
773                     goto out_of_memory;
774                 memcpy(rwfilename, value, value_len);
775                 rwfilename[value_len] = '\0';
776             }
777             else {
778                 goto bad_arg_and_value;
779             }
780         }
781 
782         arg = next_arg;
783     }
784 
785     if (rwfilename == NULL) {
786         /* we create a temporary file to store everything */
787         TempFile*    tmp = tempfile_create();
788 
789         if (tmp == NULL) {
790             XLOG("could not create temp file for %.*s NAND disk image: %s\n",
791                   devname_len, devname, strerror(errno));
792             exit(1);
793         }
794         rwfilename = (char*) tempfile_path(tmp);
795         if (VERBOSE_CHECK(init))
796             dprint( "mapping '%.*s' NAND image to %s", devname_len, devname, rwfilename);
797     }
798 
799     if(rwfilename) {
800         if (initfilename) {
801             /* Overwrite with content of the 'initfilename'. */
802             if (read_only) {
803                 /* Cannot be readonly when initializing the device from another file. */
804                 XLOG("incompatible read only option is requested while initializing %.*s from %s\n",
805                      devname_len, devname, initfilename);
806                 exit(1);
807             }
808             rwfd = open(rwfilename, O_BINARY | O_TRUNC | O_RDWR);
809         } else {
810             rwfd = open(rwfilename, O_BINARY | (read_only ? O_RDONLY : O_RDWR));
811         }
812         if(rwfd < 0) {
813             XLOG("could not open file %s, %s\n", rwfilename, strerror(errno));
814             exit(1);
815         }
816         /* this could be a writable temporary file. use atexit_close_fd to ensure
817          * that it is properly cleaned up at exit on Win32
818          */
819         if (!read_only)
820             atexit_close_fd(rwfd);
821     }
822 
823     if(initfilename) {
824         initfd = open(initfilename, O_BINARY | O_RDONLY);
825         if(initfd < 0) {
826             XLOG("could not open file %s, %s\n", initfilename, strerror(errno));
827             exit(1);
828         }
829         if(dev_size == 0) {
830             dev_size = do_lseek(initfd, 0, SEEK_END);
831             do_lseek(initfd, 0, SEEK_SET);
832         }
833     }
834 
835     new_devs = realloc(nand_devs, sizeof(nand_devs[0]) * (nand_dev_count + 1));
836     if(new_devs == NULL)
837         goto out_of_memory;
838     nand_devs = new_devs;
839     dev = &new_devs[nand_dev_count];
840 
841     dev->page_size = page_size;
842     dev->extra_size = extra_size;
843     dev->erase_size = erase_pages * (page_size + extra_size);
844     pad = dev_size % dev->erase_size;
845     if (pad != 0) {
846         dev_size += (dev->erase_size - pad);
847         D("rounding devsize up to a full eraseunit, now %llx\n", dev_size);
848     }
849     dev->devname = devname;
850     dev->devname_len = devname_len;
851     dev->max_size = dev_size;
852     dev->data = malloc(dev->erase_size);
853     if(dev->data == NULL)
854         goto out_of_memory;
855     dev->flags = read_only ? NAND_DEV_FLAG_READ_ONLY : 0;
856 #ifdef TARGET_I386
857     dev->flags |= NAND_DEV_FLAG_BATCH_CAP;
858 #endif
859 
860     if (initfd >= 0) {
861         do {
862             read_size = do_read(initfd, dev->data, dev->erase_size);
863             if(read_size < 0) {
864                 XLOG("could not read file %s, %s\n", initfilename, strerror(errno));
865                 exit(1);
866             }
867             if(do_write(rwfd, dev->data, read_size) != read_size) {
868                 XLOG("could not write file %s, %s\n", rwfilename, strerror(errno));
869                 exit(1);
870             }
871         } while(read_size == dev->erase_size);
872         close(initfd);
873     }
874     dev->fd = rwfd;
875 
876     nand_dev_count++;
877 
878     return;
879 
880 out_of_memory:
881     XLOG("out of memory\n");
882     exit(1);
883 
884 bad_arg_and_value:
885     XLOG("bad arg: %.*s=%.*s\n", arg_len, arg, value_len, value);
886     exit(1);
887 }
888 
889 #ifdef CONFIG_NAND_LIMITS
890 
891 static uint64_t
parse_nand_rw_limit(const char * value)892 parse_nand_rw_limit( const char*  value )
893 {
894     char*     end;
895     uint64_t  val = strtoul( value, &end, 0 );
896 
897     if (end == value) {
898         derror( "bad parameter value '%s': expecting unsigned integer", value );
899         exit(1);
900     }
901 
902     switch (end[0]) {
903         case 'K':  val <<= 10; break;
904         case 'M':  val <<= 20; break;
905         case 'G':  val <<= 30; break;
906         case 0: break;
907         default:
908             derror( "bad read/write limit suffix: use K, M or G" );
909             exit(1);
910     }
911     return val;
912 }
913 
914 void
parse_nand_limits(char * limits)915 parse_nand_limits(char*  limits)
916 {
917     int      pid = -1, signal = -1;
918     int64_t  reads = 0, writes = 0;
919     char*    item = limits;
920 
921     /* parse over comma-separated items */
922     while (item && *item) {
923         char*  next = strchr(item, ',');
924         char*  end;
925 
926         if (next == NULL) {
927             next = item + strlen(item);
928         } else {
929             *next++ = 0;
930         }
931 
932         if ( !memcmp(item, "pid=", 4) ) {
933             pid = strtol(item+4, &end, 10);
934             if (end == NULL || *end) {
935                 derror( "bad parameter, expecting pid=<number>, got '%s'",
936                         item );
937                 exit(1);
938             }
939             if (pid <= 0) {
940                 derror( "bad parameter: process identifier must be > 0" );
941                 exit(1);
942             }
943         }
944         else if ( !memcmp(item, "signal=", 7) ) {
945             signal = strtol(item+7,&end, 10);
946             if (end == NULL || *end) {
947                 derror( "bad parameter: expecting signal=<number>, got '%s'",
948                         item );
949                 exit(1);
950             }
951             if (signal <= 0) {
952                 derror( "bad parameter: signal number must be > 0" );
953                 exit(1);
954             }
955         }
956         else if ( !memcmp(item, "reads=", 6) ) {
957             reads = parse_nand_rw_limit(item+6);
958         }
959         else if ( !memcmp(item, "writes=", 7) ) {
960             writes = parse_nand_rw_limit(item+7);
961         }
962         else {
963             derror( "bad parameter '%s' (see -help-nand-limits)", item );
964             exit(1);
965         }
966         item = next;
967     }
968     if (pid < 0) {
969         derror( "bad paramater: missing pid=<number>" );
970         exit(1);
971     }
972     else if (signal < 0) {
973         derror( "bad parameter: missing signal=<number>" );
974         exit(1);
975     }
976     else if (reads == 0 && writes == 0) {
977         dwarning( "no read or write limit specified. ignoring -nand-limits" );
978     } else {
979         nand_threshold*  t;
980 
981         t  = &android_nand_read_threshold;
982         t->pid     = pid;
983         t->signal  = signal;
984         t->counter = 0;
985         t->limit   = reads;
986 
987         t  = &android_nand_write_threshold;
988         t->pid     = pid;
989         t->signal  = signal;
990         t->counter = 0;
991         t->limit   = writes;
992     }
993 }
994 #endif /* CONFIG_NAND_LIMITS */
995