• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "EncryptInplace.h"
18 
19 #include <ext4_utils/ext4.h>
20 #include <ext4_utils/ext4_utils.h>
21 #include <f2fs_sparseblock.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <time.h>
29 
30 #include <algorithm>
31 
32 #include <android-base/logging.h>
33 #include <android-base/properties.h>
34 
35 // HORRIBLE HACK, FIXME
36 #include "cryptfs.h"
37 
38 // FIXME horrible cut-and-paste code
unix_read(int fd,void * buff,int len)39 static inline int unix_read(int fd, void* buff, int len) {
40     return TEMP_FAILURE_RETRY(read(fd, buff, len));
41 }
42 
unix_write(int fd,const void * buff,int len)43 static inline int unix_write(int fd, const void* buff, int len) {
44     return TEMP_FAILURE_RETRY(write(fd, buff, len));
45 }
46 
47 #define CRYPT_SECTORS_PER_BUFSIZE (CRYPT_INPLACE_BUFSIZE / CRYPT_SECTOR_SIZE)
48 
49 /* aligned 32K writes tends to make flash happy.
50  * SD card association recommends it.
51  */
52 #ifndef CONFIG_HW_DISK_ENCRYPTION
53 #define BLOCKS_AT_A_TIME 8
54 #else
55 #define BLOCKS_AT_A_TIME 1024
56 #endif
57 
58 struct encryptGroupsData {
59     int realfd;
60     int cryptofd;
61     off64_t numblocks;
62     off64_t one_pct, cur_pct, new_pct;
63     off64_t blocks_already_done, tot_numblocks;
64     off64_t used_blocks_already_done, tot_used_blocks;
65     const char* real_blkdev;
66     const char* crypto_blkdev;
67     int count;
68     off64_t offset;
69     char* buffer;
70     off64_t last_written_sector;
71     int completed;
72     time_t time_started;
73     int remaining_time;
74     bool set_progress_properties;
75 };
76 
update_progress(struct encryptGroupsData * data,int is_used)77 static void update_progress(struct encryptGroupsData* data, int is_used) {
78     data->blocks_already_done++;
79 
80     if (is_used) {
81         data->used_blocks_already_done++;
82     }
83     if (data->tot_used_blocks) {
84         data->new_pct = data->used_blocks_already_done / data->one_pct;
85     } else {
86         data->new_pct = data->blocks_already_done / data->one_pct;
87     }
88 
89     if (!data->set_progress_properties) return;
90 
91     if (data->new_pct > data->cur_pct) {
92         char buf[8];
93         data->cur_pct = data->new_pct;
94         snprintf(buf, sizeof(buf), "%" PRId64, data->cur_pct);
95         android::base::SetProperty("vold.encrypt_progress", buf);
96     }
97 
98     if (data->cur_pct >= 5) {
99         struct timespec time_now;
100         if (clock_gettime(CLOCK_MONOTONIC, &time_now)) {
101             LOG(WARNING) << "Error getting time";
102         } else {
103             double elapsed_time = difftime(time_now.tv_sec, data->time_started);
104             off64_t remaining_blocks = data->tot_used_blocks - data->used_blocks_already_done;
105             int remaining_time =
106                 (int)(elapsed_time * remaining_blocks / data->used_blocks_already_done);
107 
108             // Change time only if not yet set, lower, or a lot higher for
109             // best user experience
110             if (data->remaining_time == -1 || remaining_time < data->remaining_time ||
111                 remaining_time > data->remaining_time + 60) {
112                 char buf[8];
113                 snprintf(buf, sizeof(buf), "%d", remaining_time);
114                 android::base::SetProperty("vold.encrypt_time_remaining", buf);
115                 data->remaining_time = remaining_time;
116             }
117         }
118     }
119 }
120 
log_progress(struct encryptGroupsData const * data,bool completed)121 static void log_progress(struct encryptGroupsData const* data, bool completed) {
122     // Precondition - if completed data = 0 else data != 0
123 
124     // Track progress so we can skip logging blocks
125     static off64_t offset = -1;
126 
127     // Need to close existing 'Encrypting from' log?
128     if (completed || (offset != -1 && data->offset != offset)) {
129         LOG(INFO) << "Encrypted to sector " << offset / info.block_size * CRYPT_SECTOR_SIZE;
130         offset = -1;
131     }
132 
133     // Need to start new 'Encrypting from' log?
134     if (!completed && offset != data->offset) {
135         LOG(INFO) << "Encrypting from sector " << data->offset / info.block_size * CRYPT_SECTOR_SIZE;
136     }
137 
138     // Update offset
139     if (!completed) {
140         offset = data->offset + (off64_t)data->count * info.block_size;
141     }
142 }
143 
flush_outstanding_data(struct encryptGroupsData * data)144 static int flush_outstanding_data(struct encryptGroupsData* data) {
145     if (data->count == 0) {
146         return 0;
147     }
148 
149     LOG(DEBUG) << "Copying " << data->count << " blocks at offset " << data->offset;
150 
151     if (pread64(data->realfd, data->buffer, info.block_size * data->count, data->offset) <= 0) {
152         LOG(ERROR) << "Error reading real_blkdev " << data->real_blkdev << " for inplace encrypt";
153         return -1;
154     }
155 
156     if (pwrite64(data->cryptofd, data->buffer, info.block_size * data->count, data->offset) <= 0) {
157         LOG(ERROR) << "Error writing crypto_blkdev " << data->crypto_blkdev
158                    << " for inplace encrypt";
159         return -1;
160     } else {
161         log_progress(data, false);
162     }
163 
164     data->count = 0;
165     data->last_written_sector =
166         (data->offset + data->count) / info.block_size * CRYPT_SECTOR_SIZE - 1;
167     return 0;
168 }
169 
encrypt_groups(struct encryptGroupsData * data)170 static int encrypt_groups(struct encryptGroupsData* data) {
171     unsigned int i;
172     u8* block_bitmap = 0;
173     unsigned int block;
174     off64_t ret;
175     int rc = -1;
176 
177     data->buffer = (char*)malloc(info.block_size * BLOCKS_AT_A_TIME);
178     if (!data->buffer) {
179         LOG(ERROR) << "Failed to allocate crypto buffer";
180         goto errout;
181     }
182 
183     block_bitmap = (u8*)malloc(info.block_size);
184     if (!block_bitmap) {
185         LOG(ERROR) << "failed to allocate block bitmap";
186         goto errout;
187     }
188 
189     for (i = 0; i < aux_info.groups; ++i) {
190         LOG(INFO) << "Encrypting group " << i;
191 
192         u32 first_block = aux_info.first_data_block + i * info.blocks_per_group;
193         u32 block_count = std::min(info.blocks_per_group, (u32)(aux_info.len_blocks - first_block));
194 
195         off64_t offset = (u64)info.block_size * aux_info.bg_desc[i].bg_block_bitmap;
196 
197         ret = pread64(data->realfd, block_bitmap, info.block_size, offset);
198         if (ret != (int)info.block_size) {
199             LOG(ERROR) << "failed to read all of block group bitmap " << i;
200             goto errout;
201         }
202 
203         offset = (u64)info.block_size * first_block;
204 
205         data->count = 0;
206 
207         for (block = 0; block < block_count; block++) {
208             int used;
209 
210             if (aux_info.bg_desc[i].bg_flags & EXT4_BG_BLOCK_UNINIT) {
211                 // In block groups with an uninitialized block bitmap, we only
212                 // need to encrypt the backup superblock (if one is present).
213                 used = (ext4_bg_has_super_block(i) && block < 1 + aux_info.bg_desc_blocks);
214             } else {
215                 used = bitmap_get_bit(block_bitmap, block);
216             }
217 
218             update_progress(data, used);
219             if (used) {
220                 if (data->count == 0) {
221                     data->offset = offset;
222                 }
223                 data->count++;
224             } else {
225                 if (flush_outstanding_data(data)) {
226                     goto errout;
227                 }
228             }
229 
230             offset += info.block_size;
231 
232             /* Write data if we are aligned or buffer size reached */
233             if (offset % (info.block_size * BLOCKS_AT_A_TIME) == 0 ||
234                 data->count == BLOCKS_AT_A_TIME) {
235                 if (flush_outstanding_data(data)) {
236                     goto errout;
237                 }
238             }
239         }
240         if (flush_outstanding_data(data)) {
241             goto errout;
242         }
243     }
244 
245     data->completed = 1;
246     rc = 0;
247 
248 errout:
249     log_progress(0, true);
250     free(data->buffer);
251     free(block_bitmap);
252     return rc;
253 }
254 
cryptfs_enable_inplace_ext4(const char * crypto_blkdev,const char * real_blkdev,off64_t size,off64_t * size_already_done,off64_t tot_size,off64_t previously_encrypted_upto,bool set_progress_properties)255 static int cryptfs_enable_inplace_ext4(const char* crypto_blkdev, const char* real_blkdev,
256                                        off64_t size, off64_t* size_already_done, off64_t tot_size,
257                                        off64_t previously_encrypted_upto,
258                                        bool set_progress_properties) {
259     u32 i;
260     struct encryptGroupsData data;
261     int rc;  // Can't initialize without causing warning -Wclobbered
262     int retries = RETRY_MOUNT_ATTEMPTS;
263     struct timespec time_started = {0};
264 
265     if (previously_encrypted_upto > *size_already_done) {
266         LOG(DEBUG) << "Not fast encrypting since resuming part way through";
267         return -1;
268     }
269 
270     memset(&data, 0, sizeof(data));
271     data.real_blkdev = real_blkdev;
272     data.crypto_blkdev = crypto_blkdev;
273     data.set_progress_properties = set_progress_properties;
274 
275     LOG(DEBUG) << "Opening" << real_blkdev;
276     if ((data.realfd = open(real_blkdev, O_RDWR | O_CLOEXEC)) < 0) {
277         PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for inplace encrypt";
278         rc = -1;
279         goto errout;
280     }
281 
282     LOG(DEBUG) << "Opening" << crypto_blkdev;
283     // Wait until the block device appears.  Re-use the mount retry values since it is reasonable.
284     while ((data.cryptofd = open(crypto_blkdev, O_WRONLY | O_CLOEXEC)) < 0) {
285         if (--retries) {
286             PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
287                         << " for ext4 inplace encrypt, retrying";
288             sleep(RETRY_MOUNT_DELAY_SECONDS);
289         } else {
290             PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
291                         << " for ext4 inplace encrypt";
292             rc = ENABLE_INPLACE_ERR_DEV;
293             goto errout;
294         }
295     }
296 
297     if (setjmp(setjmp_env)) {  // NOLINT
298         LOG(ERROR) << "Reading ext4 extent caused an exception";
299         rc = -1;
300         goto errout;
301     }
302 
303     if (read_ext(data.realfd, 0) != 0) {
304         LOG(ERROR) << "Failed to read ext4 extent";
305         rc = -1;
306         goto errout;
307     }
308 
309     data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
310     data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
311     data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
312 
313     LOG(INFO) << "Encrypting ext4 filesystem in place...";
314 
315     data.tot_used_blocks = data.numblocks;
316     for (i = 0; i < aux_info.groups; ++i) {
317         data.tot_used_blocks -= aux_info.bg_desc[i].bg_free_blocks_count;
318     }
319 
320     data.one_pct = data.tot_used_blocks / 100;
321     data.cur_pct = 0;
322 
323     if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
324         LOG(WARNING) << "Error getting time at start";
325         // Note - continue anyway - we'll run with 0
326     }
327     data.time_started = time_started.tv_sec;
328     data.remaining_time = -1;
329 
330     rc = encrypt_groups(&data);
331     if (rc) {
332         LOG(ERROR) << "Error encrypting groups";
333         goto errout;
334     }
335 
336     *size_already_done += data.completed ? size : data.last_written_sector;
337     rc = 0;
338 
339 errout:
340     close(data.realfd);
341     close(data.cryptofd);
342 
343     return rc;
344 }
345 
log_progress_f2fs(u64 block,bool completed)346 static void log_progress_f2fs(u64 block, bool completed) {
347     // Precondition - if completed data = 0 else data != 0
348 
349     // Track progress so we can skip logging blocks
350     static u64 last_block = (u64)-1;
351 
352     // Need to close existing 'Encrypting from' log?
353     if (completed || (last_block != (u64)-1 && block != last_block + 1)) {
354         LOG(INFO) << "Encrypted to block " << last_block;
355         last_block = -1;
356     }
357 
358     // Need to start new 'Encrypting from' log?
359     if (!completed && (last_block == (u64)-1 || block != last_block + 1)) {
360         LOG(INFO) << "Encrypting from block " << block;
361     }
362 
363     // Update offset
364     if (!completed) {
365         last_block = block;
366     }
367 }
368 
encrypt_one_block_f2fs(u64 pos,void * data)369 static int encrypt_one_block_f2fs(u64 pos, void* data) {
370     struct encryptGroupsData* priv_dat = (struct encryptGroupsData*)data;
371 
372     priv_dat->blocks_already_done = pos - 1;
373     update_progress(priv_dat, 1);
374 
375     off64_t offset = pos * CRYPT_INPLACE_BUFSIZE;
376 
377     if (pread64(priv_dat->realfd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
378         LOG(ERROR) << "Error reading real_blkdev " << priv_dat->crypto_blkdev
379                    << " for f2fs inplace encrypt";
380         return -1;
381     }
382 
383     if (pwrite64(priv_dat->cryptofd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
384         LOG(ERROR) << "Error writing crypto_blkdev " << priv_dat->crypto_blkdev
385                    << " for f2fs inplace encrypt";
386         return -1;
387     } else {
388         log_progress_f2fs(pos, false);
389     }
390 
391     return 0;
392 }
393 
cryptfs_enable_inplace_f2fs(const char * crypto_blkdev,const char * real_blkdev,off64_t size,off64_t * size_already_done,off64_t tot_size,off64_t previously_encrypted_upto,bool set_progress_properties)394 static int cryptfs_enable_inplace_f2fs(const char* crypto_blkdev, const char* real_blkdev,
395                                        off64_t size, off64_t* size_already_done, off64_t tot_size,
396                                        off64_t previously_encrypted_upto,
397                                        bool set_progress_properties) {
398     struct encryptGroupsData data;
399     struct f2fs_info* f2fs_info = NULL;
400     int rc = ENABLE_INPLACE_ERR_OTHER;
401     struct timespec time_started = {0};
402 
403     if (previously_encrypted_upto > *size_already_done) {
404         LOG(DEBUG) << "Not fast encrypting since resuming part way through";
405         return ENABLE_INPLACE_ERR_OTHER;
406     }
407     memset(&data, 0, sizeof(data));
408     data.real_blkdev = real_blkdev;
409     data.crypto_blkdev = crypto_blkdev;
410     data.set_progress_properties = set_progress_properties;
411     data.realfd = -1;
412     data.cryptofd = -1;
413     if ((data.realfd = open64(real_blkdev, O_RDWR | O_CLOEXEC)) < 0) {
414         PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for f2fs inplace encrypt";
415         goto errout;
416     }
417     if ((data.cryptofd = open64(crypto_blkdev, O_WRONLY | O_CLOEXEC)) < 0) {
418         PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
419                     << " for f2fs inplace encrypt";
420         rc = ENABLE_INPLACE_ERR_DEV;
421         goto errout;
422     }
423 
424     f2fs_info = generate_f2fs_info(data.realfd);
425     if (!f2fs_info) goto errout;
426 
427     data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
428     data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
429     data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
430 
431     data.tot_used_blocks = get_num_blocks_used(f2fs_info);
432 
433     data.one_pct = data.tot_used_blocks / 100;
434     data.cur_pct = 0;
435     if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
436         LOG(WARNING) << "Error getting time at start";
437         // Note - continue anyway - we'll run with 0
438     }
439     data.time_started = time_started.tv_sec;
440     data.remaining_time = -1;
441 
442 
443     data.buffer = (char*)malloc(f2fs_info->block_size);
444     if (!data.buffer) {
445         LOG(ERROR) << "Failed to allocate crypto buffer";
446         goto errout;
447     }
448 
449     data.count = 0;
450 
451     /* Currently, this either runs to completion, or hits a nonrecoverable error */
452     rc = run_on_used_blocks(data.blocks_already_done, f2fs_info, &encrypt_one_block_f2fs, &data);
453 
454     if (rc) {
455         LOG(ERROR) << "Error in running over f2fs blocks";
456         rc = ENABLE_INPLACE_ERR_OTHER;
457         goto errout;
458     }
459 
460     *size_already_done += size;
461     rc = 0;
462 
463 errout:
464     if (rc) LOG(ERROR) << "Failed to encrypt f2fs filesystem on " << real_blkdev;
465 
466     log_progress_f2fs(0, true);
467     free(f2fs_info);
468     free(data.buffer);
469     close(data.realfd);
470     close(data.cryptofd);
471 
472     return rc;
473 }
474 
cryptfs_enable_inplace_full(const char * crypto_blkdev,const char * real_blkdev,off64_t size,off64_t * size_already_done,off64_t tot_size,off64_t previously_encrypted_upto,bool set_progress_properties)475 static int cryptfs_enable_inplace_full(const char* crypto_blkdev, const char* real_blkdev,
476                                        off64_t size, off64_t* size_already_done, off64_t tot_size,
477                                        off64_t previously_encrypted_upto,
478                                        bool set_progress_properties) {
479     int realfd, cryptofd;
480     char* buf[CRYPT_INPLACE_BUFSIZE];
481     int rc = ENABLE_INPLACE_ERR_OTHER;
482     off64_t numblocks, i, remainder;
483     off64_t one_pct, cur_pct, new_pct;
484     off64_t blocks_already_done, tot_numblocks;
485 
486     if ((realfd = open(real_blkdev, O_RDONLY | O_CLOEXEC)) < 0) {
487         PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for inplace encrypt";
488         return ENABLE_INPLACE_ERR_OTHER;
489     }
490 
491     if ((cryptofd = open(crypto_blkdev, O_WRONLY | O_CLOEXEC)) < 0) {
492         PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev << " for inplace encrypt";
493         close(realfd);
494         return ENABLE_INPLACE_ERR_DEV;
495     }
496 
497     /* This is pretty much a simple loop of reading 4K, and writing 4K.
498      * The size passed in is the number of 512 byte sectors in the filesystem.
499      * So compute the number of whole 4K blocks we should read/write,
500      * and the remainder.
501      */
502     numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
503     remainder = size % CRYPT_SECTORS_PER_BUFSIZE;
504     tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
505     blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
506 
507     LOG(ERROR) << "Encrypting filesystem in place...";
508 
509     i = previously_encrypted_upto + 1 - *size_already_done;
510 
511     if (lseek64(realfd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
512         PLOG(ERROR) << "Cannot seek to previously encrypted point on " << real_blkdev;
513         goto errout;
514     }
515 
516     if (lseek64(cryptofd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
517         PLOG(ERROR) << "Cannot seek to previously encrypted point on " << crypto_blkdev;
518         goto errout;
519     }
520 
521     for (; i < size && i % CRYPT_SECTORS_PER_BUFSIZE != 0; ++i) {
522         if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
523             PLOG(ERROR) << "Error reading initial sectors from real_blkdev " << real_blkdev
524                         << " for inplace encrypt";
525             goto errout;
526         }
527         if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
528             PLOG(ERROR) << "Error writing initial sectors to crypto_blkdev " << crypto_blkdev
529                         << " for inplace encrypt";
530             goto errout;
531         } else {
532             LOG(INFO) << "Encrypted 1 block at " << i;
533         }
534     }
535 
536     one_pct = tot_numblocks / 100;
537     cur_pct = 0;
538     /* process the majority of the filesystem in blocks */
539     for (i /= CRYPT_SECTORS_PER_BUFSIZE; i < numblocks; i++) {
540         new_pct = (i + blocks_already_done) / one_pct;
541         if (set_progress_properties && new_pct > cur_pct) {
542             char property_buf[8];
543 
544             cur_pct = new_pct;
545             snprintf(property_buf, sizeof(property_buf), "%" PRId64, cur_pct);
546             android::base::SetProperty("vold.encrypt_progress", property_buf);
547         }
548         if (unix_read(realfd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
549             PLOG(ERROR) << "Error reading real_blkdev " << real_blkdev << " for inplace encrypt";
550             goto errout;
551         }
552         if (unix_write(cryptofd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
553             PLOG(ERROR) << "Error writing crypto_blkdev " << crypto_blkdev << " for inplace encrypt";
554             goto errout;
555         } else {
556             LOG(DEBUG) << "Encrypted " << CRYPT_SECTORS_PER_BUFSIZE << " block at "
557                        << i * CRYPT_SECTORS_PER_BUFSIZE;
558         }
559     }
560 
561     /* Do any remaining sectors */
562     for (i = 0; i < remainder; i++) {
563         if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
564             LOG(ERROR) << "Error reading final sectors from real_blkdev " << real_blkdev
565                        << " for inplace encrypt";
566             goto errout;
567         }
568         if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
569             LOG(ERROR) << "Error writing final sectors to crypto_blkdev " << crypto_blkdev
570                        << " for inplace encrypt";
571             goto errout;
572         } else {
573             LOG(INFO) << "Encrypted 1 block at next location";
574         }
575     }
576 
577     *size_already_done += size;
578     rc = 0;
579 
580 errout:
581     close(realfd);
582     close(cryptofd);
583 
584     return rc;
585 }
586 
587 /* returns on of the ENABLE_INPLACE_* return codes */
cryptfs_enable_inplace(const char * crypto_blkdev,const char * real_blkdev,off64_t size,off64_t * size_already_done,off64_t tot_size,off64_t previously_encrypted_upto,bool set_progress_properties)588 int cryptfs_enable_inplace(const char* crypto_blkdev, const char* real_blkdev, off64_t size,
589                            off64_t* size_already_done, off64_t tot_size,
590                            off64_t previously_encrypted_upto, bool set_progress_properties) {
591     int rc_ext4, rc_f2fs, rc_full;
592     LOG(DEBUG) << "cryptfs_enable_inplace(" << crypto_blkdev << ", " << real_blkdev << ", " << size
593                << ", " << size_already_done << ", " << tot_size << ", " << previously_encrypted_upto
594                << ", " << set_progress_properties << ")";
595     if (previously_encrypted_upto) {
596         LOG(DEBUG) << "Continuing encryption from " << previously_encrypted_upto;
597     }
598 
599     if (*size_already_done + size < previously_encrypted_upto) {
600         LOG(DEBUG) << "cryptfs_enable_inplace already done";
601         *size_already_done += size;
602         return 0;
603     }
604 
605     /* TODO: identify filesystem type.
606      * As is, cryptfs_enable_inplace_ext4 will fail on an f2fs partition, and
607      * then we will drop down to cryptfs_enable_inplace_f2fs.
608      * */
609     if ((rc_ext4 = cryptfs_enable_inplace_ext4(crypto_blkdev, real_blkdev, size, size_already_done,
610                                                tot_size, previously_encrypted_upto,
611                                                set_progress_properties)) == 0) {
612         LOG(DEBUG) << "cryptfs_enable_inplace_ext4 success";
613         return 0;
614     }
615     LOG(DEBUG) << "cryptfs_enable_inplace_ext4()=" << rc_ext4;
616 
617     if ((rc_f2fs = cryptfs_enable_inplace_f2fs(crypto_blkdev, real_blkdev, size, size_already_done,
618                                                tot_size, previously_encrypted_upto,
619                                                set_progress_properties)) == 0) {
620         LOG(DEBUG) << "cryptfs_enable_inplace_f2fs success";
621         return 0;
622     }
623     LOG(DEBUG) << "cryptfs_enable_inplace_f2fs()=" << rc_f2fs;
624 
625     rc_full =
626         cryptfs_enable_inplace_full(crypto_blkdev, real_blkdev, size, size_already_done, tot_size,
627                                     previously_encrypted_upto, set_progress_properties);
628     LOG(DEBUG) << "cryptfs_enable_inplace_full()=" << rc_full;
629 
630     /* Hack for b/17898962, the following is the symptom... */
631     if (rc_ext4 == ENABLE_INPLACE_ERR_DEV && rc_f2fs == ENABLE_INPLACE_ERR_DEV &&
632         rc_full == ENABLE_INPLACE_ERR_DEV) {
633         LOG(DEBUG) << "ENABLE_INPLACE_ERR_DEV";
634         return ENABLE_INPLACE_ERR_DEV;
635     }
636     return rc_full;
637 }
638