• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2017 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_generator/squashfs_filesystem.h"
18 
19 #include <fcntl.h>
20 
21 #include <algorithm>
22 #include <string>
23 #include <utility>
24 
25 #include <base/files/file_util.h>
26 #include <base/logging.h>
27 #include <base/strings/string_number_conversions.h>
28 #include <base/strings/string_split.h>
29 #include <brillo/streams/file_stream.h>
30 
31 #include "update_engine/common/subprocess.h"
32 #include "update_engine/common/utils.h"
33 #include "update_engine/payload_generator/deflate_utils.h"
34 #include "update_engine/payload_generator/delta_diff_generator.h"
35 #include "update_engine/payload_generator/extent_ranges.h"
36 #include "update_engine/payload_generator/extent_utils.h"
37 #include "update_engine/update_metadata.pb.h"
38 
39 using std::string;
40 using std::unique_ptr;
41 using std::vector;
42 
43 namespace chromeos_update_engine {
44 
45 namespace {
46 
47 // The size of the squashfs super block.
48 constexpr size_t kSquashfsSuperBlockSize = 96;
49 constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
50 constexpr uint32_t kSquashfsZlibCompression = 1;
51 
ReadSquashfsHeader(const brillo::Blob blob,SquashfsFilesystem::SquashfsHeader * header)52 bool ReadSquashfsHeader(const brillo::Blob blob,
53                         SquashfsFilesystem::SquashfsHeader* header) {
54   if (blob.size() < kSquashfsSuperBlockSize) {
55     return false;
56   }
57 
58   memcpy(&header->magic, blob.data(), 4);
59   memcpy(&header->block_size, blob.data() + 12, 4);
60   memcpy(&header->compression_type, blob.data() + 20, 2);
61   memcpy(&header->major_version, blob.data() + 28, 2);
62   return true;
63 }
64 
CheckHeader(const SquashfsFilesystem::SquashfsHeader & header)65 bool CheckHeader(const SquashfsFilesystem::SquashfsHeader& header) {
66   return header.magic == 0x73717368 && header.major_version == 4;
67 }
68 
GetFileMapContent(const string & sqfs_path,string * map)69 bool GetFileMapContent(const string& sqfs_path, string* map) {
70   // Create a tmp file
71   string map_file;
72   TEST_AND_RETURN_FALSE(
73       utils::MakeTempFile("squashfs_file_map.XXXXXX", &map_file, nullptr));
74   ScopedPathUnlinker map_unlinker(map_file);
75 
76   // Run unsquashfs to get the system file map.
77   // unsquashfs -m <map-file> <squashfs-file>
78   vector<string> cmd = {"unsquashfs", "-m", map_file, sqfs_path};
79   string stdout;
80   int exit_code;
81   if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) ||
82       exit_code != 0) {
83     LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: "
84                << stdout;
85     return false;
86   }
87   TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map));
88   return true;
89 }
90 
91 }  // namespace
92 
Init(const string & map,const string & sqfs_path,size_t size,const SquashfsHeader & header,bool extract_deflates)93 bool SquashfsFilesystem::Init(const string& map,
94                               const string& sqfs_path,
95                               size_t size,
96                               const SquashfsHeader& header,
97                               bool extract_deflates) {
98   size_ = size;
99 
100   bool is_zlib = header.compression_type == kSquashfsZlibCompression;
101   if (!is_zlib) {
102     LOG(WARNING) << "Filesystem is not Gzipped. Not filling deflates!";
103   }
104   vector<puffin::ByteExtent> zlib_blks;
105 
106   // Reading files map. For the format of the file map look at the comments for
107   // |CreateFromFileMap()|.
108   auto lines = base::SplitStringPiece(map,
109                                       "\n",
110                                       base::WhitespaceHandling::KEEP_WHITESPACE,
111                                       base::SplitResult::SPLIT_WANT_NONEMPTY);
112   for (const auto& line : lines) {
113     auto splits =
114         base::SplitStringPiece(line,
115                                " \t",
116                                base::WhitespaceHandling::TRIM_WHITESPACE,
117                                base::SplitResult::SPLIT_WANT_NONEMPTY);
118     // Only filename is invalid.
119     TEST_AND_RETURN_FALSE(splits.size() > 1);
120     uint64_t start;
121     TEST_AND_RETURN_FALSE(base::StringToUint64(splits[1], &start));
122     uint64_t cur_offset = start;
123     for (size_t i = 2; i < splits.size(); ++i) {
124       uint64_t blk_size;
125       TEST_AND_RETURN_FALSE(base::StringToUint64(splits[i], &blk_size));
126       // TODO(ahassani): For puffin push it into a proper list if uncompressed.
127       auto new_blk_size = blk_size & ~kSquashfsCompressedBit;
128       TEST_AND_RETURN_FALSE(new_blk_size <= header.block_size);
129       if (new_blk_size > 0 && !(blk_size & kSquashfsCompressedBit)) {
130         // Compressed block
131         if (is_zlib && extract_deflates) {
132           zlib_blks.emplace_back(cur_offset, new_blk_size);
133         }
134       }
135       cur_offset += new_blk_size;
136     }
137 
138     // If size is zero do not add the file.
139     if (cur_offset - start > 0) {
140       File file;
141       file.name = splits[0].as_string();
142       file.extents = {ExtentForBytes(kBlockSize, start, cur_offset - start)};
143       files_.emplace_back(file);
144     }
145   }
146 
147   // Sort all files by their offset in the squashfs.
148   std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
149     return a.extents[0].start_block() < b.extents[0].start_block();
150   });
151   // If there is any overlap between two consecutive extents, remove them. Here
152   // we are assuming all files have exactly one extent. If this assumption
153   // changes then this implementation needs to change too.
154   for (auto first = files_.begin(), second = first + 1;
155        first != files_.end() && second != files_.end();
156        second = first + 1) {
157     auto first_begin = first->extents[0].start_block();
158     auto first_end = first_begin + first->extents[0].num_blocks();
159     auto second_begin = second->extents[0].start_block();
160     auto second_end = second_begin + second->extents[0].num_blocks();
161     // Remove the first file if the size is zero.
162     if (first_end == first_begin) {
163       first = files_.erase(first);
164     } else if (first_end > second_begin) {  // We found a collision.
165       if (second_end <= first_end) {
166         // Second file is inside the first file, remove the second file.
167         second = files_.erase(second);
168       } else if (first_begin == second_begin) {
169         // First file is inside the second file, remove the first file.
170         first = files_.erase(first);
171       } else {
172         // Remove overlapping extents from the first file.
173         first->extents[0].set_num_blocks(second_begin - first_begin);
174         ++first;
175       }
176     } else {
177       ++first;
178     }
179   }
180 
181   // Find all the metadata including superblock and add them to the list of
182   // files.
183   ExtentRanges file_extents;
184   for (const auto& file : files_) {
185     file_extents.AddExtents(file.extents);
186   }
187   vector<Extent> full = {ExtentForBytes(kBlockSize, 0, size_)};
188   auto metadata_extents = FilterExtentRanges(full, file_extents);
189   // For now there should be at most two extents. One for superblock and one for
190   // metadata at the end. Just create appropriate files with <metadata-i> name.
191   // We can add all these extents as one metadata too, but that violates the
192   // contiguous write optimization.
193   for (size_t i = 0; i < metadata_extents.size(); i++) {
194     File file;
195     file.name = "<metadata-" + std::to_string(i) + ">";
196     file.extents = {metadata_extents[i]};
197     files_.emplace_back(file);
198   }
199 
200   // Do one last sort before returning.
201   std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
202     return a.extents[0].start_block() < b.extents[0].start_block();
203   });
204 
205   if (is_zlib && extract_deflates) {
206     // If it is infact gzipped, then the sqfs_path should be valid to read its
207     // content.
208     TEST_AND_RETURN_FALSE(!sqfs_path.empty());
209     if (zlib_blks.empty()) {
210       return true;
211     }
212 
213     // Sort zlib blocks.
214     std::sort(zlib_blks.begin(),
215               zlib_blks.end(),
216               [](const puffin::ByteExtent& a, const puffin::ByteExtent& b) {
217                 return a.offset < b.offset;
218               });
219 
220     // Sanity check. Make sure zlib blocks are not overlapping.
221     auto result = std::adjacent_find(
222         zlib_blks.begin(),
223         zlib_blks.end(),
224         [](const puffin::ByteExtent& a, const puffin::ByteExtent& b) {
225           return (a.offset + a.length) > b.offset;
226         });
227     TEST_AND_RETURN_FALSE(result == zlib_blks.end());
228 
229     vector<puffin::BitExtent> deflates;
230     TEST_AND_RETURN_FALSE(
231         puffin::LocateDeflatesInZlibBlocks(sqfs_path, zlib_blks, &deflates));
232 
233     // Add deflates for each file.
234     for (auto& file : files_) {
235       file.deflates = deflate_utils::FindDeflates(file.extents, deflates);
236     }
237   }
238   return true;
239 }
240 
CreateFromFile(const string & sqfs_path,bool extract_deflates)241 unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFile(
242     const string& sqfs_path, bool extract_deflates) {
243   if (sqfs_path.empty())
244     return nullptr;
245 
246   brillo::StreamPtr sqfs_file =
247       brillo::FileStream::Open(base::FilePath(sqfs_path),
248                                brillo::Stream::AccessMode::READ,
249                                brillo::FileStream::Disposition::OPEN_EXISTING,
250                                nullptr);
251   if (!sqfs_file) {
252     LOG(ERROR) << "Unable to open " << sqfs_path << " for reading.";
253     return nullptr;
254   }
255 
256   SquashfsHeader header;
257   brillo::Blob blob(kSquashfsSuperBlockSize);
258   if (!sqfs_file->ReadAllBlocking(blob.data(), blob.size(), nullptr)) {
259     LOG(ERROR) << "Unable to read from file: " << sqfs_path;
260     return nullptr;
261   }
262   if (!ReadSquashfsHeader(blob, &header) || !CheckHeader(header)) {
263     // This is not necessary an error.
264     return nullptr;
265   }
266 
267   // Read the map file.
268   string filemap;
269   if (!GetFileMapContent(sqfs_path, &filemap)) {
270     LOG(ERROR) << "Failed to produce squashfs map file: " << sqfs_path;
271     return nullptr;
272   }
273 
274   unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
275   if (!sqfs->Init(
276           filemap, sqfs_path, sqfs_file->GetSize(), header, extract_deflates)) {
277     LOG(ERROR) << "Failed to initialized the Squashfs file system";
278     return nullptr;
279   }
280 
281   return sqfs;
282 }
283 
CreateFromFileMap(const string & filemap,size_t size,const SquashfsHeader & header)284 unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFileMap(
285     const string& filemap, size_t size, const SquashfsHeader& header) {
286   if (!CheckHeader(header)) {
287     LOG(ERROR) << "Invalid Squashfs super block!";
288     return nullptr;
289   }
290 
291   unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
292   if (!sqfs->Init(filemap, "", size, header, false)) {
293     LOG(ERROR) << "Failed to initialize the Squashfs file system using filemap";
294     return nullptr;
295   }
296   // TODO(ahassani): Add a function that initializes the puffin related extents.
297   return sqfs;
298 }
299 
GetBlockSize() const300 size_t SquashfsFilesystem::GetBlockSize() const {
301   return kBlockSize;
302 }
303 
GetBlockCount() const304 size_t SquashfsFilesystem::GetBlockCount() const {
305   return size_ / kBlockSize;
306 }
307 
GetFiles(vector<File> * files) const308 bool SquashfsFilesystem::GetFiles(vector<File>* files) const {
309   files->insert(files->end(), files_.begin(), files_.end());
310   return true;
311 }
312 
LoadSettings(brillo::KeyValueStore * store) const313 bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
314   // Settings not supported in squashfs.
315   LOG(ERROR) << "squashfs doesn't support LoadSettings().";
316   return false;
317 }
318 
IsSquashfsImage(const brillo::Blob & blob)319 bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) {
320   SquashfsHeader header;
321   return ReadSquashfsHeader(blob, &header) && CheckHeader(header);
322 }
323 }  // namespace chromeos_update_engine
324