• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "fd_file.h"
18 
19 #include <errno.h>
20 #include <stdio.h>
21 #include <sys/stat.h>
22 #include <sys/types.h>
23 #include <unistd.h>
24 
25 #if defined(__BIONIC__)
26 #include <android/fdsan.h>
27 #include <android/api-level.h>
28 #endif
29 
30 #if defined(_WIN32)
31 #include <windows.h>
32 #endif
33 
34 #include <limits>
35 #include <vector>
36 
37 #include <android-base/file.h>
38 #include <android-base/logging.h>
39 #include <android-base/properties.h>
40 
41 // Includes needed for FdFile::Copy().
42 #include "base/globals.h"
43 #ifdef __linux__
44 #include "base/bit_utils.h"
45 #include "base/mem_map.h"
46 #include "sys/mman.h"
47 #else
48 #include <algorithm>
49 #include "base/stl_util.h"
50 #endif
51 
52 namespace unix_file {
53 
54 // f2fs decompress issue.
b376814207()55 static bool b376814207() {
56 #ifdef __BIONIC__
57   if (android_get_device_api_level() >= 35) {
58     return false;
59   }
60 #endif
61   std::string property = android::base::GetProperty("ro.product.build.fingerprint", "");
62   return property.starts_with("samsung");
63 }
64 
65 // Used to work around kernel bugs.
AllowSparseFiles()66 bool AllowSparseFiles() {
67   static bool allow = !b376814207();
68   return allow;
69 }
70 
71 #if defined(_WIN32)
72 // RAII wrapper for an event object to allow asynchronous I/O to correctly signal completion.
73 class ScopedEvent {
74  public:
ScopedEvent()75   ScopedEvent() {
76     handle_ = CreateEventA(/*lpEventAttributes*/ nullptr,
77                            /*bManualReset*/ true,
78                            /*bInitialState*/ false,
79                            /*lpName*/ nullptr);
80   }
81 
~ScopedEvent()82   ~ScopedEvent() { CloseHandle(handle_); }
83 
handle()84   HANDLE handle() { return handle_; }
85 
86  private:
87   HANDLE handle_;
88   DISALLOW_COPY_AND_ASSIGN(ScopedEvent);
89 };
90 
91 // Windows implementation of pread/pwrite. Note that these DO move the file descriptor's read/write
92 // position, but do so atomically.
pread(int fd,void * data,size_t byte_count,off64_t offset)93 static ssize_t pread(int fd, void* data, size_t byte_count, off64_t offset) {
94   ScopedEvent event;
95   if (event.handle() == INVALID_HANDLE_VALUE) {
96     PLOG(ERROR) << "Could not create event handle.";
97     errno = EIO;
98     return static_cast<ssize_t>(-1);
99   }
100 
101   auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
102   DWORD bytes_read = 0;
103   OVERLAPPED overlapped = {};
104   overlapped.Offset = static_cast<DWORD>(offset);
105   overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
106   overlapped.hEvent = event.handle();
107   if (!ReadFile(handle, data, static_cast<DWORD>(byte_count), &bytes_read, &overlapped)) {
108     // If the read failed with other than ERROR_IO_PENDING, return an error.
109     // ERROR_IO_PENDING signals the write was begun asynchronously.
110     // Block until the asynchronous operation has finished or fails, and return
111     // result accordingly.
112     if (::GetLastError() != ERROR_IO_PENDING ||
113         !::GetOverlappedResult(handle, &overlapped, &bytes_read, TRUE)) {
114       // In case someone tries to read errno (since this is masquerading as a POSIX call).
115       errno = EIO;
116       return static_cast<ssize_t>(-1);
117     }
118   }
119   return static_cast<ssize_t>(bytes_read);
120 }
121 
pwrite(int fd,const void * buf,size_t count,off64_t offset)122 static ssize_t pwrite(int fd, const void* buf, size_t count, off64_t offset) {
123   ScopedEvent event;
124   if (event.handle() == INVALID_HANDLE_VALUE) {
125     PLOG(ERROR) << "Could not create event handle.";
126     errno = EIO;
127     return static_cast<ssize_t>(-1);
128   }
129 
130   auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
131   DWORD bytes_written = 0;
132   OVERLAPPED overlapped = {};
133   overlapped.Offset = static_cast<DWORD>(offset);
134   overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
135   overlapped.hEvent = event.handle();
136   if (!::WriteFile(handle, buf, count, &bytes_written, &overlapped)) {
137     // If the write failed with other than ERROR_IO_PENDING, return an error.
138     // ERROR_IO_PENDING signals the write was begun asynchronously.
139     // Block until the asynchronous operation has finished or fails, and return
140     // result accordingly.
141     if (::GetLastError() != ERROR_IO_PENDING ||
142         !::GetOverlappedResult(handle, &overlapped, &bytes_written, TRUE)) {
143       // In case someone tries to read errno (since this is masquerading as a POSIX call).
144       errno = EIO;
145       return static_cast<ssize_t>(-1);
146     }
147   }
148   return static_cast<ssize_t>(bytes_written);
149 }
150 
fsync(int fd)151 static int fsync(int fd) {
152   auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
153   if (handle != INVALID_HANDLE_VALUE && ::FlushFileBuffers(handle)) {
154     return 0;
155   }
156   errno = EINVAL;
157   return -1;
158 }
159 #endif
160 
161 #if defined(__BIONIC__)
GetFdFileOwnerTag(FdFile * fd_file)162 static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
163   return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
164                                         reinterpret_cast<uint64_t>(fd_file));
165 }
166 #endif
167 
FdFile(int fd,bool check_usage)168 FdFile::FdFile(int fd, bool check_usage)
169     : FdFile(fd, std::string(), check_usage) {}
170 
FdFile(int fd,const std::string & path,bool check_usage)171 FdFile::FdFile(int fd, const std::string& path, bool check_usage)
172     : FdFile(fd, path, check_usage, false) {}
173 
FdFile(int fd,const std::string & path,bool check_usage,bool read_only_mode)174 FdFile::FdFile(int fd, const std::string& path, bool check_usage,
175                bool read_only_mode)
176     : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
177       fd_(fd),
178       file_path_(path),
179       read_only_mode_(read_only_mode) {
180 #if defined(__BIONIC__)
181   if (fd >= 0) {
182     android_fdsan_exchange_owner_tag(fd, 0, GetFdFileOwnerTag(this));
183   }
184 #endif
185 }
186 
FdFile(const std::string & path,int flags,mode_t mode,bool check_usage)187 FdFile::FdFile(const std::string& path, int flags, mode_t mode,
188                bool check_usage) {
189   Open(path, flags, mode);
190   if (!check_usage || !IsOpened()) {
191     guard_state_ = GuardState::kNoCheck;
192   }
193 }
194 
Destroy()195 void FdFile::Destroy() {
196   if (kCheckSafeUsage && (guard_state_ < GuardState::kNoCheck)) {
197     if (guard_state_ < GuardState::kFlushed) {
198       LOG(ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
199     }
200     if (guard_state_ < GuardState::kClosed) {
201       LOG(ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
202     }
203     DCHECK_GE(guard_state_, GuardState::kClosed);
204   }
205   if (fd_ != kInvalidFd) {
206     if (Close() != 0) {
207       PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
208     }
209   }
210 }
211 
FdFile(FdFile && other)212 FdFile::FdFile(FdFile&& other) noexcept
213     : guard_state_(other.guard_state_),
214       fd_(other.fd_),
215       file_path_(std::move(other.file_path_)),
216       read_only_mode_(other.read_only_mode_) {
217 #if defined(__BIONIC__)
218   if (fd_ >= 0) {
219     android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
220   }
221 #endif
222   other.guard_state_ = GuardState::kClosed;
223   other.fd_ = kInvalidFd;
224 }
225 
operator =(FdFile && other)226 FdFile& FdFile::operator=(FdFile&& other) noexcept {
227   if (this == &other) {
228     return *this;
229   }
230 
231   if (this->fd_ != other.fd_) {
232     Destroy();  // Free old state.
233   }
234 
235   guard_state_ = other.guard_state_;
236   fd_ = other.fd_;
237   file_path_ = std::move(other.file_path_);
238   read_only_mode_ = other.read_only_mode_;
239 
240 #if defined(__BIONIC__)
241   if (fd_ >= 0) {
242     android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
243   }
244 #endif
245   other.guard_state_ = GuardState::kClosed;
246   other.fd_ = kInvalidFd;
247   return *this;
248 }
249 
~FdFile()250 FdFile::~FdFile() {
251   Destroy();
252 }
253 
Release()254 int FdFile::Release() {
255   int tmp_fd = fd_;
256   fd_ = kInvalidFd;
257   guard_state_ = GuardState::kNoCheck;
258 #if defined(__BIONIC__)
259   if (tmp_fd >= 0) {
260     android_fdsan_exchange_owner_tag(tmp_fd, GetFdFileOwnerTag(this), 0);
261   }
262 #endif
263   return tmp_fd;
264 }
265 
Reset(int fd,bool check_usage)266 void FdFile::Reset(int fd, bool check_usage) {
267   CHECK_NE(fd, fd_);
268 
269   if (fd_ != kInvalidFd) {
270     Destroy();
271   }
272   fd_ = fd;
273 
274 #if defined(__BIONIC__)
275   if (fd_ >= 0) {
276     android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
277   }
278 #endif
279 
280   if (check_usage) {
281     guard_state_ = fd == kInvalidFd ? GuardState::kNoCheck : GuardState::kBase;
282   } else {
283     guard_state_ = GuardState::kNoCheck;
284   }
285 }
286 
moveTo(GuardState target,GuardState warn_threshold,const char * warning)287 void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
288   if (kCheckSafeUsage) {
289     if (guard_state_ < GuardState::kNoCheck) {
290       if (warn_threshold < GuardState::kNoCheck && guard_state_ >= warn_threshold) {
291         LOG(ERROR) << warning;
292       }
293       guard_state_ = target;
294     }
295   }
296 }
297 
moveUp(GuardState target,const char * warning)298 void FdFile::moveUp(GuardState target, const char* warning) {
299   if (kCheckSafeUsage) {
300     if (guard_state_ < GuardState::kNoCheck) {
301       if (guard_state_ < target) {
302         guard_state_ = target;
303       } else if (target < guard_state_) {
304         LOG(ERROR) << warning;
305       }
306     }
307   }
308 }
309 
Open(const std::string & path,int flags)310 bool FdFile::Open(const std::string& path, int flags) {
311   return Open(path, flags, 0640);
312 }
313 
Open(const std::string & path,int flags,mode_t mode)314 bool FdFile::Open(const std::string& path, int flags, mode_t mode) {
315   static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
316   DCHECK_EQ(fd_, kInvalidFd) << path;
317   read_only_mode_ = ((flags & O_ACCMODE) == O_RDONLY);
318   fd_ = TEMP_FAILURE_RETRY(open(path.c_str(), flags, mode));
319   if (fd_ == kInvalidFd) {
320     return false;
321   }
322 
323 #if defined(__BIONIC__)
324   android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
325 #endif
326 
327   file_path_ = path;
328   if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
329     // Start in the base state (not flushed, not closed).
330     guard_state_ = GuardState::kBase;
331   } else {
332     // We are not concerned with read-only files. In that case, proper flushing and closing is
333     // not important.
334     guard_state_ = GuardState::kNoCheck;
335   }
336   return true;
337 }
338 
Close()339 int FdFile::Close() {
340 #if defined(__BIONIC__)
341   int result = android_fdsan_close_with_tag(fd_, GetFdFileOwnerTag(this));
342 #else
343   int result = close(fd_);
344 #endif
345 
346   // Test here, so the file is closed and not leaked.
347   if (kCheckSafeUsage) {
348     DCHECK_GE(guard_state_, GuardState::kFlushed) << "File " << file_path_
349         << " has not been flushed before closing.";
350     moveUp(GuardState::kClosed, nullptr);
351   }
352 
353 #if defined(__linux__)
354   // close always succeeds on linux, even if failure is reported.
355   UNUSED(result);
356 #else
357   if (result == -1) {
358     return -errno;
359   }
360 #endif
361 
362   fd_ = kInvalidFd;
363   file_path_ = "";
364   return 0;
365 }
366 
Flush(bool flush_metadata)367 int FdFile::Flush(bool flush_metadata) {
368   DCHECK(flush_metadata || !read_only_mode_);
369 
370 #ifdef __linux__
371   int rc;
372   if (flush_metadata) {
373     rc = TEMP_FAILURE_RETRY(fsync(fd_));
374   } else {
375     rc = TEMP_FAILURE_RETRY(fdatasync(fd_));
376   }
377 #else
378   int rc = TEMP_FAILURE_RETRY(fsync(fd_));
379 #endif
380 
381   moveUp(GuardState::kFlushed, "Flushing closed file.");
382   if (rc == 0) {
383     return 0;
384   }
385 
386   // Don't report failure if we just tried to flush a pipe or socket.
387   return errno == EINVAL ? 0 : -errno;
388 }
389 
Read(char * buf,int64_t byte_count,int64_t offset) const390 int64_t FdFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
391 #ifdef __linux__
392   int rc = TEMP_FAILURE_RETRY(pread64(fd_, buf, byte_count, offset));
393 #else
394   int rc = TEMP_FAILURE_RETRY(pread(fd_, buf, byte_count, offset));
395 #endif
396   return (rc == -1) ? -errno : rc;
397 }
398 
SetLength(int64_t new_length)399 int FdFile::SetLength(int64_t new_length) {
400   DCHECK(!read_only_mode_);
401 #ifdef __linux__
402   int rc = TEMP_FAILURE_RETRY(ftruncate64(fd_, new_length));
403 #else
404   int rc = TEMP_FAILURE_RETRY(ftruncate(fd_, new_length));
405 #endif
406   moveTo(GuardState::kBase, GuardState::kClosed, "Truncating closed file.");
407   return (rc == -1) ? -errno : rc;
408 }
409 
GetLength() const410 int64_t FdFile::GetLength() const {
411   struct stat s;
412   int rc = TEMP_FAILURE_RETRY(fstat(fd_, &s));
413   return (rc == -1) ? -errno : s.st_size;
414 }
415 
Write(const char * buf,int64_t byte_count,int64_t offset)416 int64_t FdFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
417   DCHECK(!read_only_mode_);
418 #ifdef __linux__
419   int rc = TEMP_FAILURE_RETRY(pwrite64(fd_, buf, byte_count, offset));
420 #else
421   int rc = TEMP_FAILURE_RETRY(pwrite(fd_, buf, byte_count, offset));
422 #endif
423   moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
424   return (rc == -1) ? -errno : rc;
425 }
426 
Fd() const427 int FdFile::Fd() const {
428   return fd_;
429 }
430 
ReadOnlyMode() const431 bool FdFile::ReadOnlyMode() const {
432   return read_only_mode_;
433 }
434 
CheckUsage() const435 bool FdFile::CheckUsage() const {
436   return guard_state_ != GuardState::kNoCheck;
437 }
438 
IsOpened() const439 bool FdFile::IsOpened() const {
440   return FdFile::IsOpenFd(fd_);
441 }
442 
ReadIgnoreOffset(int fd,void * buf,size_t count,off_t offset)443 static ssize_t ReadIgnoreOffset(int fd, void *buf, size_t count, off_t offset) {
444   DCHECK_EQ(offset, 0);
445   return read(fd, buf, count);
446 }
447 
448 template <ssize_t (*read_func)(int, void*, size_t, off_t)>
ReadFullyGeneric(int fd,void * buffer,size_t byte_count,size_t offset)449 static bool ReadFullyGeneric(int fd, void* buffer, size_t byte_count, size_t offset) {
450   char* ptr = static_cast<char*>(buffer);
451   while (byte_count > 0) {
452     ssize_t bytes_read = TEMP_FAILURE_RETRY(read_func(fd, ptr, byte_count, offset));
453     if (bytes_read <= 0) {
454       // 0: end of file
455       // -1: error
456       return false;
457     }
458     byte_count -= bytes_read;  // Reduce the number of remaining bytes.
459     ptr += bytes_read;  // Move the buffer forward.
460     offset += static_cast<size_t>(bytes_read);  // Move the offset forward.
461   }
462   return true;
463 }
464 
ReadFully(void * buffer,size_t byte_count)465 bool FdFile::ReadFully(void* buffer, size_t byte_count) {
466   return ReadFullyGeneric<ReadIgnoreOffset>(fd_, buffer, byte_count, 0);
467 }
468 
PreadFully(void * buffer,size_t byte_count,size_t offset)469 bool FdFile::PreadFully(void* buffer, size_t byte_count, size_t offset) {
470   return ReadFullyGeneric<pread>(fd_, buffer, byte_count, offset);
471 }
472 
473 template <bool kUseOffset>
WriteFullyGeneric(const void * buffer,size_t byte_count,size_t offset)474 bool FdFile::WriteFullyGeneric(const void* buffer, size_t byte_count, size_t offset) {
475   DCHECK(!read_only_mode_);
476   moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
477   DCHECK(kUseOffset || offset == 0u);
478   const char* ptr = static_cast<const char*>(buffer);
479   while (byte_count > 0) {
480     ssize_t bytes_written = kUseOffset
481         ? TEMP_FAILURE_RETRY(pwrite(fd_, ptr, byte_count, offset))
482         : TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
483     if (bytes_written == -1) {
484       return false;
485     }
486     byte_count -= bytes_written;  // Reduce the number of remaining bytes.
487     ptr += bytes_written;  // Move the buffer forward.
488     offset += static_cast<size_t>(bytes_written);
489   }
490   return true;
491 }
492 
PwriteFully(const void * buffer,size_t byte_count,size_t offset)493 bool FdFile::PwriteFully(const void* buffer, size_t byte_count, size_t offset) {
494   return WriteFullyGeneric<true>(buffer, byte_count, offset);
495 }
496 
WriteFully(const void * buffer,size_t byte_count)497 bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
498   return WriteFullyGeneric<false>(buffer, byte_count, 0u);
499 }
500 
Rename(const std::string & new_path)501 bool FdFile::Rename(const std::string& new_path) {
502   if (kCheckSafeUsage) {
503     // Filesystems that use delayed allocation (e.g., ext4) may journal a rename before a data
504     // update is written to disk. Therefore on system crash, the data update may not persist.
505     // Guard against this by ensuring the file has been flushed prior to rename.
506     if (guard_state_ < GuardState::kFlushed) {
507       LOG(ERROR) << "File " << file_path_ << " has not been flushed before renaming.";
508     }
509     DCHECK_GE(guard_state_, GuardState::kFlushed);
510   }
511 
512   if (!FilePathMatchesFd()) {
513     LOG(ERROR) << "Failed rename because the file descriptor is not backed by the expected file "
514                << "path: " << file_path_;
515     return false;
516   }
517 
518   std::string old_path = file_path_;
519   int rc = std::rename(old_path.c_str(), new_path.c_str());
520   if (rc != 0) {
521     LOG(ERROR) << "Rename from '" << old_path << "' to '" << new_path << "' failed.";
522     return false;
523   }
524   file_path_ = new_path;
525 
526   // Rename modifies the directory entries mapped within the parent directory file descriptor(s),
527   // rather than the file, so flushing the file will not persist the change to disk. Therefore, we
528   // flush the parent directory file descriptor(s).
529   std::string old_dir = android::base::Dirname(old_path);
530   std::string new_dir = android::base::Dirname(new_path);
531   std::vector<std::string> sync_dirs = {new_dir};
532   if (new_dir != old_dir) {
533     sync_dirs.emplace_back(old_dir);
534   }
535   for (auto& dirname : sync_dirs) {
536     FdFile dir = FdFile(dirname, O_RDONLY, /*check_usage=*/false);
537     rc = dir.Flush(/*flush_metadata=*/true);
538     if (rc != 0) {
539       LOG(ERROR) << "Flushing directory '" << dirname << "' during rename failed.";
540       return false;
541     }
542     rc = dir.Close();
543     if (rc != 0) {
544       LOG(ERROR) << "Closing directory '" << dirname << "' during rename failed.";
545       return false;
546     }
547   }
548   return true;
549 }
550 
551 #ifdef __linux__
SparseWrite(const uint8_t * data,size_t size,const std::vector<uint8_t> & zeroes)552 bool FdFile::SparseWrite(const uint8_t* data,
553                          size_t size,
554                          const std::vector<uint8_t>& zeroes) {
555   DCHECK_GE(zeroes.size(), size);
556   if (memcmp(zeroes.data(), data, size) == 0 && AllowSparseFiles()) {
557     // These bytes are all zeroes, skip them by moving the file offset via lseek SEEK_CUR (available
558     // since linux kernel 3.1).
559     if (TEMP_FAILURE_RETRY(lseek(Fd(), size, SEEK_CUR)) < 0) {
560       return false;
561     }
562   } else {
563     if (!WriteFully(data, size)) {
564       return false;
565     }
566   }
567   return true;
568 }
569 
UserspaceSparseCopy(const FdFile * input_file,off_t off,size_t size,size_t fs_blocksize)570 bool FdFile::UserspaceSparseCopy(const FdFile* input_file,
571                                  off_t off,
572                                  size_t size,
573                                  size_t fs_blocksize) {
574   // Map the input file. We will begin the copy 'off' bytes into the map.
575   art::MemMap::Init();
576   std::string error_msg;
577   art::MemMap mmap = art::MemMap::MapFile(off + size,
578                                           PROT_READ,
579                                           MAP_PRIVATE,
580                                           input_file->Fd(),
581                                           /*start=*/0,
582                                           /*low_4gb=*/false,
583                                           input_file->GetPath().c_str(),
584                                           &error_msg);
585   if (!mmap.IsValid()) {
586     LOG(ERROR) << "Failed to mmap " << input_file->GetPath() << " for copying: " << error_msg;
587     return false;
588   }
589 
590   std::vector<uint8_t> zeroes(/*n=*/fs_blocksize, /*val=*/0);
591 
592   // Iterate through each fs_blocksize of the copy region.
593   uint8_t* input_ptr = mmap.Begin() + off;
594   for (; (input_ptr + fs_blocksize) <= mmap.End(); input_ptr += fs_blocksize) {
595     if (!SparseWrite(input_ptr, fs_blocksize, zeroes)) {
596       return false;
597     }
598   }
599   // Finish copying any remaining bytes.
600   const size_t remaining_bytes = size % fs_blocksize;
601   if (remaining_bytes > 0) {
602     if (!SparseWrite(input_ptr, remaining_bytes, zeroes)) {
603       return false;
604     }
605   }
606   // Update the input file FD offset to the end of the copy region.
607   off_t input_offset = TEMP_FAILURE_RETRY(lseek(input_file->Fd(), off + size, SEEK_SET));
608   if (input_offset != (off + static_cast<off_t>(size))) {
609     return false;
610   }
611   return true;
612 }
613 #endif
614 
Copy(FdFile * input_file,int64_t offset,int64_t size)615 bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) {
616   DCHECK(!read_only_mode_);
617   off_t off = static_cast<off_t>(offset);
618   off_t sz = static_cast<off_t>(size);
619   if (offset < 0 || static_cast<int64_t>(off) != offset ||
620       size < 0 || static_cast<int64_t>(sz) != size ||
621       sz > std::numeric_limits<off_t>::max() - off) {
622     errno = EINVAL;
623     return false;
624   }
625   if (size == 0) {
626     return true;
627   }
628 
629 #ifdef __linux__
630   off_t current_offset = TEMP_FAILURE_RETRY(lseek(Fd(), 0, SEEK_CUR));
631   if (GetLength() > current_offset) {
632     // Copying to an existing region of the destination file is not supported. The current
633     // implementation would incorrectly preserve all existing data regions within the output file
634     // which match the locations of holes within the input file.
635     LOG(ERROR) << "Cannot copy into an existing region of the destination file.";
636     errno = EINVAL;
637     return false;
638   }
639   struct stat output_stat;
640   if (TEMP_FAILURE_RETRY(fstat(Fd(), &output_stat)) < 0) {
641     return false;
642   }
643   const off_t fs_blocksize = output_stat.st_blksize;
644   if (!art::IsAlignedParam(current_offset, fs_blocksize)) {
645     // The input region is copied (skipped or written) in chunks of the output file's blocksize. For
646     // those chunks to be represented as holes or data, they should land as aligned blocks in the
647     // output file. Therefore, here we enforce that the current output offset is aligned.
648     LOG(ERROR) << "Copy destination FD offset (" << current_offset << ") must be aligned with"
649                << " blocksize (" << fs_blocksize << ").";
650     errno = EINVAL;
651     return false;
652   }
653   const size_t end_length = GetLength() + sz;
654   if (!UserspaceSparseCopy(input_file, off, sz, fs_blocksize)) {
655     return false;
656   }
657   // In case the last blocks of the input file were a hole, fix the length to what would have been
658   // set if they had been data.
659   if (SetLength(end_length) != 0) {
660     return false;
661   }
662 #else
663   if (lseek(input_file->Fd(), off, SEEK_SET) != off) {
664     return false;
665   }
666   constexpr size_t kMaxBufferSize = 16 * ::art::KB;
667   const size_t buffer_size = std::min<uint64_t>(size, kMaxBufferSize);
668   art::UniqueCPtr<void> buffer(malloc(buffer_size));
669   if (buffer == nullptr) {
670     errno = ENOMEM;
671     return false;
672   }
673   while (size != 0) {
674     size_t chunk_size = std::min<uint64_t>(buffer_size, size);
675     if (!input_file->ReadFully(buffer.get(), chunk_size) ||
676         !WriteFully(buffer.get(), chunk_size)) {
677       return false;
678     }
679     size -= chunk_size;
680   }
681 #endif
682   return true;
683 }
684 
FilePathMatchesFd()685 bool FdFile::FilePathMatchesFd() {
686   if (file_path_.empty()) {
687     return false;
688   }
689   // Try to figure out whether file_path_ is still referring to the one on disk.
690   bool is_current = false;
691   struct stat this_stat, current_stat;
692   int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY | O_CLOEXEC));
693   if (cur_fd > 0) {
694     // File still exists.
695     if (fstat(fd_, &this_stat) == 0 && fstat(cur_fd, &current_stat) == 0) {
696       is_current = (this_stat.st_dev == current_stat.st_dev) &&
697                    (this_stat.st_ino == current_stat.st_ino);
698     }
699     close(cur_fd);
700   }
701   return is_current;
702 }
703 
Unlink()704 bool FdFile::Unlink() {
705   bool is_current = FilePathMatchesFd();
706   if (is_current) {
707     unlink(file_path_.c_str());
708   }
709 
710   return is_current;
711 }
712 
Erase(bool unlink)713 bool FdFile::Erase(bool unlink) {
714   DCHECK(!read_only_mode_);
715 
716   bool ret_result = true;
717   if (unlink) {
718     ret_result = Unlink();
719   }
720 
721   int result;
722   result = SetLength(0);
723   result = Flush();
724   result = Close();
725   // Ignore the errors.
726   (void) result;
727 
728   return ret_result;
729 }
730 
FlushCloseOrErase()731 int FdFile::FlushCloseOrErase() {
732   DCHECK(!read_only_mode_);
733   int flush_result = Flush();
734   if (flush_result != 0) {
735     LOG(ERROR) << "CloseOrErase failed while flushing a file.";
736     Erase();
737     return flush_result;
738   }
739   int close_result = Close();
740   if (close_result != 0) {
741     LOG(ERROR) << "CloseOrErase failed while closing a file.";
742     Erase();
743     return close_result;
744   }
745   return 0;
746 }
747 
FlushClose()748 int FdFile::FlushClose() {
749   DCHECK(!read_only_mode_);
750   int flush_result = Flush();
751   if (flush_result != 0) {
752     LOG(ERROR) << "FlushClose failed while flushing a file.";
753   }
754   int close_result = Close();
755   if (close_result != 0) {
756     LOG(ERROR) << "FlushClose failed while closing a file.";
757   }
758   return (flush_result != 0) ? flush_result : close_result;
759 }
760 
MarkUnchecked()761 void FdFile::MarkUnchecked() {
762   guard_state_ = GuardState::kNoCheck;
763 }
764 
ClearContent()765 bool FdFile::ClearContent() {
766   DCHECK(!read_only_mode_);
767   if (SetLength(0) < 0) {
768     PLOG(ERROR) << "Failed to reset the length";
769     return false;
770   }
771   return ResetOffset();
772 }
773 
ResetOffset()774 bool FdFile::ResetOffset() {
775   DCHECK(!read_only_mode_);
776   off_t rc =  TEMP_FAILURE_RETRY(lseek(fd_, 0, SEEK_SET));
777   if (rc == static_cast<off_t>(-1)) {
778     PLOG(ERROR) << "Failed to reset the offset";
779     return false;
780   }
781   return true;
782 }
783 
Compare(FdFile * other)784 int FdFile::Compare(FdFile* other) {
785   int64_t length = GetLength();
786   int64_t length2 = other->GetLength();
787   if (length != length2) {
788     return length < length2 ? -1 : 1;
789   }
790   static const size_t kBufferSize = 4096;
791   std::unique_ptr<uint8_t[]> buffer1(new uint8_t[kBufferSize]);
792   std::unique_ptr<uint8_t[]> buffer2(new uint8_t[kBufferSize]);
793   size_t offset = 0;
794   while (length > 0) {
795     size_t len = std::min(kBufferSize, static_cast<size_t>(length));
796     if (!PreadFully(&buffer1[0], len, offset)) {
797       return -1;
798     }
799     if (!other->PreadFully(&buffer2[0], len, offset)) {
800       return 1;
801     }
802     int result = memcmp(&buffer1[0], &buffer2[0], len);
803     if (result != 0) {
804       return result;
805     }
806     length -= len;
807     offset += len;
808   }
809   return 0;
810 }
811 
IsOpenFd(int fd)812 bool FdFile::IsOpenFd(int fd) {
813   if (fd == kInvalidFd) {
814     return false;
815   }
816   #ifdef _WIN32  // Windows toolchain does not support F_GETFD.
817     return true;
818   #else
819     int saved_errno = errno;
820     bool is_open = (fcntl(fd, F_GETFD) != -1);
821     errno = saved_errno;
822     return is_open;
823   #endif
824 }
825 
826 }  // namespace unix_file
827