1 use std::collections::{BTreeMap, BTreeSet};
2 use std::fs::{copy, File};
3 use std::io::{BufRead, BufReader, Read, Write};
4 use std::path::{Path, PathBuf};
5
6 use anyhow::{anyhow, Context, Result};
7 use serde::{Deserialize, Serialize};
8 use sha2::{Digest, Sha256};
9
10 /// JSON serde struct.
11 // FIXME(b/221489531): Remove when we clear out start_version and
12 // end_version.
13 #[derive(Debug, Clone, Serialize, Deserialize)]
14 pub struct PatchDictSchema {
15 /// [deprecated(since = "1.1", note = "Use version_range")]
16 #[serde(skip_serializing_if = "Option::is_none")]
17 pub end_version: Option<u64>,
18 pub metadata: Option<BTreeMap<String, serde_json::Value>>,
19 #[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
20 pub platforms: BTreeSet<String>,
21 pub rel_patch_path: String,
22 /// [deprecated(since = "1.1", note = "Use version_range")]
23 #[serde(skip_serializing_if = "Option::is_none")]
24 pub start_version: Option<u64>,
25 pub version_range: Option<VersionRange>,
26 }
27
28 #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
29 pub struct VersionRange {
30 pub from: Option<u64>,
31 pub until: Option<u64>,
32 }
33
34 // FIXME(b/221489531): Remove when we clear out start_version and
35 // end_version.
36 impl PatchDictSchema {
get_start_version(&self) -> Option<u64>37 pub fn get_start_version(&self) -> Option<u64> {
38 self.version_range
39 .map(|x| x.from)
40 .unwrap_or(self.start_version)
41 }
42
get_end_version(&self) -> Option<u64>43 pub fn get_end_version(&self) -> Option<u64> {
44 self.version_range
45 .map(|x| x.until)
46 .unwrap_or(self.end_version)
47 }
48 }
49
50 /// Struct to keep track of patches and their relative paths.
51 #[derive(Debug, Clone)]
52 pub struct PatchCollection {
53 pub patches: Vec<PatchDictSchema>,
54 pub workdir: PathBuf,
55 }
56
57 impl PatchCollection {
58 /// Create a `PatchCollection` from a PATCHES.
parse_from_file(json_file: &Path) -> Result<Self>59 pub fn parse_from_file(json_file: &Path) -> Result<Self> {
60 Ok(Self {
61 patches: serde_json::from_reader(File::open(json_file)?)?,
62 workdir: json_file
63 .parent()
64 .ok_or_else(|| anyhow!("failed to get json_file parent"))?
65 .to_path_buf(),
66 })
67 }
68
69 /// Create a `PatchCollection` from a string literal and a workdir.
parse_from_str(workdir: PathBuf, contents: &str) -> Result<Self>70 pub fn parse_from_str(workdir: PathBuf, contents: &str) -> Result<Self> {
71 Ok(Self {
72 patches: serde_json::from_str(contents).context("parsing from str")?,
73 workdir,
74 })
75 }
76
77 /// Copy this collection with patches filtered by given criterion.
filter_patches(&self, f: impl FnMut(&PatchDictSchema) -> bool) -> Self78 pub fn filter_patches(&self, f: impl FnMut(&PatchDictSchema) -> bool) -> Self {
79 Self {
80 patches: self.patches.iter().cloned().filter(f).collect(),
81 workdir: self.workdir.clone(),
82 }
83 }
84
85 /// Map over the patches.
map_patches(&self, f: impl FnMut(&PatchDictSchema) -> PatchDictSchema) -> Self86 pub fn map_patches(&self, f: impl FnMut(&PatchDictSchema) -> PatchDictSchema) -> Self {
87 Self {
88 patches: self.patches.iter().map(f).collect(),
89 workdir: self.workdir.clone(),
90 }
91 }
92
93 /// Return true if the collection is tracking any patches.
is_empty(&self) -> bool94 pub fn is_empty(&self) -> bool {
95 self.patches.is_empty()
96 }
97
98 /// Compute the set-set subtraction, returning a new `PatchCollection` which
99 /// keeps the minuend's workdir.
subtract(&self, subtrahend: &Self) -> Result<Self>100 pub fn subtract(&self, subtrahend: &Self) -> Result<Self> {
101 let mut new_patches = Vec::new();
102 // This is O(n^2) when it could be much faster, but n is always going to be less
103 // than 1k and speed is not important here.
104 for our_patch in &self.patches {
105 let found_in_sub = subtrahend.patches.iter().any(|sub_patch| {
106 let hash1 = subtrahend
107 .hash_from_rel_patch(sub_patch)
108 .expect("getting hash from subtrahend patch");
109 let hash2 = self
110 .hash_from_rel_patch(our_patch)
111 .expect("getting hash from our patch");
112 hash1 == hash2
113 });
114 if !found_in_sub {
115 new_patches.push(our_patch.clone());
116 }
117 }
118 Ok(Self {
119 patches: new_patches,
120 workdir: self.workdir.clone(),
121 })
122 }
123
union(&self, other: &Self) -> Result<Self>124 pub fn union(&self, other: &Self) -> Result<Self> {
125 self.union_helper(
126 other,
127 |p| self.hash_from_rel_patch(p),
128 |p| other.hash_from_rel_patch(p),
129 )
130 }
131
union_helper( &self, other: &Self, our_hash_f: impl Fn(&PatchDictSchema) -> Result<String>, their_hash_f: impl Fn(&PatchDictSchema) -> Result<String>, ) -> Result<Self>132 fn union_helper(
133 &self,
134 other: &Self,
135 our_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
136 their_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
137 ) -> Result<Self> {
138 // 1. For all our patches:
139 // a. If there exists a matching patch hash from `other`:
140 // i. Create a new patch with merged platform info,
141 // ii. add the new patch to our new collection.
142 // iii. Mark the other patch as "merged"
143 // b. Otherwise, copy our patch to the new collection
144 // 2. For all unmerged patches from the `other`
145 // a. Copy their patch into the new collection
146 let mut combined_patches = Vec::new();
147 let mut other_merged = vec![false; other.patches.len()];
148
149 // 1.
150 for p in &self.patches {
151 let our_hash = our_hash_f(p)?;
152 let mut found = false;
153 // a.
154 for (idx, merged) in other_merged.iter_mut().enumerate() {
155 if !*merged {
156 let other_p = &other.patches[idx];
157 let their_hash = their_hash_f(other_p)?;
158 if our_hash == their_hash {
159 // i.
160 let new_platforms =
161 p.platforms.union(&other_p.platforms).cloned().collect();
162 // ii.
163 combined_patches.push(PatchDictSchema {
164 rel_patch_path: p.rel_patch_path.clone(),
165 start_version: p.start_version,
166 end_version: p.end_version,
167 platforms: new_platforms,
168 metadata: p.metadata.clone(),
169 version_range: p.version_range,
170 });
171 // iii.
172 *merged = true;
173 found = true;
174 break;
175 }
176 }
177 }
178 // b.
179 if !found {
180 combined_patches.push(p.clone());
181 }
182 }
183 // 2.
184 // Add any remaining, other-only patches.
185 for (idx, merged) in other_merged.iter().enumerate() {
186 if !*merged {
187 combined_patches.push(other.patches[idx].clone());
188 }
189 }
190
191 Ok(Self {
192 workdir: self.workdir.clone(),
193 patches: combined_patches,
194 })
195 }
196
197 /// Copy all patches from this collection into another existing collection, and write that
198 /// to the existing collection's file.
transpose_write(&self, existing_collection: &mut Self) -> Result<()>199 pub fn transpose_write(&self, existing_collection: &mut Self) -> Result<()> {
200 for p in &self.patches {
201 let original_file_path = self.workdir.join(&p.rel_patch_path);
202 let copy_file_path = existing_collection.workdir.join(&p.rel_patch_path);
203 copy_create_parents(&original_file_path, ©_file_path)?;
204 existing_collection.patches.push(p.clone());
205 }
206 existing_collection.write_patches_json("PATCHES.json")
207 }
208
209 /// Write out the patch collection contents to a PATCHES.json file.
write_patches_json(&self, filename: &str) -> Result<()>210 fn write_patches_json(&self, filename: &str) -> Result<()> {
211 let write_path = self.workdir.join(filename);
212 let mut new_patches_file = File::create(&write_path)
213 .with_context(|| format!("writing to {}", write_path.display()))?;
214 new_patches_file.write_all(self.serialize_patches()?.as_bytes())?;
215 Ok(())
216 }
217
serialize_patches(&self) -> Result<String>218 pub fn serialize_patches(&self) -> Result<String> {
219 let mut serialization_buffer = Vec::<u8>::new();
220 // Four spaces to indent json serialization.
221 let mut serializer = serde_json::Serializer::with_formatter(
222 &mut serialization_buffer,
223 serde_json::ser::PrettyFormatter::with_indent(b" "),
224 );
225 self.patches
226 .serialize(&mut serializer)
227 .context("serializing patches to JSON")?;
228 // Append a newline at the end if not present. This is necessary to get
229 // past some pre-upload hooks.
230 if serialization_buffer.last() != Some(&b'\n') {
231 serialization_buffer.push(b'\n');
232 }
233 Ok(std::str::from_utf8(&serialization_buffer)?.to_string())
234 }
235
236 /// Return whether a given patch actually exists on the file system.
patch_exists(&self, patch: &PatchDictSchema) -> bool237 pub fn patch_exists(&self, patch: &PatchDictSchema) -> bool {
238 self.workdir.join(&patch.rel_patch_path).exists()
239 }
240
hash_from_rel_patch(&self, patch: &PatchDictSchema) -> Result<String>241 fn hash_from_rel_patch(&self, patch: &PatchDictSchema) -> Result<String> {
242 hash_from_patch_path(&self.workdir.join(&patch.rel_patch_path))
243 }
244 }
245
246 impl std::fmt::Display for PatchCollection {
fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result247 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
248 for (i, p) in self.patches.iter().enumerate() {
249 let title = p
250 .metadata
251 .as_ref()
252 .and_then(|x| x.get("title"))
253 .and_then(serde_json::Value::as_str)
254 .unwrap_or("[No Title]");
255 let path = self.workdir.join(&p.rel_patch_path);
256 writeln!(f, "* {}", title)?;
257 if i == self.patches.len() - 1 {
258 write!(f, " {}", path.display())?;
259 } else {
260 writeln!(f, " {}", path.display())?;
261 }
262 }
263 Ok(())
264 }
265 }
266
267 /// Generate a PatchCollection incorporating only the diff between current patches and old patch
268 /// contents.
new_patches( patches_path: &Path, old_patch_contents: &str, platform: &str, ) -> Result<(PatchCollection, PatchCollection)>269 pub fn new_patches(
270 patches_path: &Path,
271 old_patch_contents: &str,
272 platform: &str,
273 ) -> Result<(PatchCollection, PatchCollection)> {
274 let cur_collection = PatchCollection::parse_from_file(patches_path)
275 .with_context(|| format!("parsing {} PATCHES.json", platform))?;
276 let cur_collection = filter_patches_by_platform(&cur_collection, platform);
277 let cur_collection = cur_collection.filter_patches(|p| cur_collection.patch_exists(p));
278 let new_patches: PatchCollection = {
279 let old_collection = PatchCollection::parse_from_str(
280 patches_path.parent().unwrap().to_path_buf(),
281 old_patch_contents,
282 )?;
283 let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p));
284 cur_collection.subtract(&old_collection)?
285 };
286 let new_patches = new_patches.map_patches(|p| {
287 let mut platforms = BTreeSet::new();
288 platforms.extend(["android".to_string(), "chromiumos".to_string()]);
289 PatchDictSchema {
290 platforms: platforms.union(&p.platforms).cloned().collect(),
291 ..p.to_owned()
292 }
293 });
294 Ok((cur_collection, new_patches))
295 }
296
297 /// Create a new collection with only the patches that apply to the
298 /// given platform.
299 ///
300 /// If there's no platform listed, the patch should still apply if the patch file exists.
filter_patches_by_platform(collection: &PatchCollection, platform: &str) -> PatchCollection301 pub fn filter_patches_by_platform(collection: &PatchCollection, platform: &str) -> PatchCollection {
302 collection.filter_patches(|p| {
303 p.platforms.contains(platform) || (p.platforms.is_empty() && collection.patch_exists(p))
304 })
305 }
306
307 /// Get the hash from the patch file contents.
308 ///
309 /// Not every patch file actually contains its own hash,
310 /// we must compute the hash ourselves when it's not found.
hash_from_patch(patch_contents: impl Read) -> Result<String>311 fn hash_from_patch(patch_contents: impl Read) -> Result<String> {
312 let mut reader = BufReader::new(patch_contents);
313 let mut buf = String::new();
314 reader.read_line(&mut buf)?;
315 let mut first_line_iter = buf.trim().split(' ').fuse();
316 let (fst_word, snd_word) = (first_line_iter.next(), first_line_iter.next());
317 if let (Some("commit" | "From"), Some(hash_str)) = (fst_word, snd_word) {
318 // If the first line starts with either "commit" or "From", the following
319 // text is almost certainly a commit hash.
320 Ok(hash_str.to_string())
321 } else {
322 // This is an annoying case where the patch isn't actually a commit.
323 // So we'll hash the entire file, and hope that's sufficient.
324 let mut hasher = Sha256::new();
325 hasher.update(&buf); // Have to hash the first line.
326 reader.read_to_string(&mut buf)?;
327 hasher.update(buf); // Hash the rest of the file.
328 let sha = hasher.finalize();
329 Ok(format!("{:x}", &sha))
330 }
331 }
332
hash_from_patch_path(patch: &Path) -> Result<String>333 fn hash_from_patch_path(patch: &Path) -> Result<String> {
334 let f = File::open(patch).with_context(|| format!("opening patch file {}", patch.display()))?;
335 hash_from_patch(f)
336 }
337
338 /// Copy a file from one path to another, and create any parent
339 /// directories along the way.
copy_create_parents(from: &Path, to: &Path) -> Result<()>340 fn copy_create_parents(from: &Path, to: &Path) -> Result<()> {
341 let to_parent = to
342 .parent()
343 .with_context(|| format!("getting parent of {}", to.display()))?;
344 if !to_parent.exists() {
345 std::fs::create_dir_all(to_parent)?;
346 }
347
348 copy(&from, &to)
349 .with_context(|| format!("copying file from {} to {}", &from.display(), &to.display()))?;
350 Ok(())
351 }
352
353 #[cfg(test)]
354 mod test {
355
356 use super::*;
357
358 /// Test we can extract the hash from patch files.
359 #[test]
test_hash_from_patch()360 fn test_hash_from_patch() {
361 // Example git patch from Gerrit
362 let desired_hash = "004be4037e1e9c6092323c5c9268acb3ecf9176c";
363 let test_file_contents = "commit 004be4037e1e9c6092323c5c9268acb3ecf9176c\n\
364 Author: An Author <some_email>\n\
365 Date: Thu Aug 6 12:34:16 2020 -0700";
366 assert_eq!(
367 &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
368 desired_hash
369 );
370
371 // Example git patch from upstream
372 let desired_hash = "6f85225ef3791357f9b1aa097b575b0a2b0dff48";
373 let test_file_contents = "From 6f85225ef3791357f9b1aa097b575b0a2b0dff48\n\
374 Mon Sep 17 00:00:00 2001\n\
375 From: Another Author <another_email>\n\
376 Date: Wed, 18 Aug 2021 15:03:03 -0700";
377 assert_eq!(
378 &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
379 desired_hash
380 );
381 }
382
383 #[test]
test_union()384 fn test_union() {
385 let patch1 = PatchDictSchema {
386 start_version: Some(0),
387 end_version: Some(1),
388 rel_patch_path: "a".into(),
389 metadata: None,
390 platforms: BTreeSet::from(["x".into()]),
391 version_range: Some(VersionRange {
392 from: Some(0),
393 until: Some(1),
394 }),
395 };
396 let patch2 = PatchDictSchema {
397 rel_patch_path: "b".into(),
398 platforms: BTreeSet::from(["x".into(), "y".into()]),
399 ..patch1.clone()
400 };
401 let patch3 = PatchDictSchema {
402 platforms: BTreeSet::from(["z".into(), "x".into()]),
403 ..patch1.clone()
404 };
405 let collection1 = PatchCollection {
406 workdir: PathBuf::new(),
407 patches: vec![patch1, patch2],
408 };
409 let collection2 = PatchCollection {
410 workdir: PathBuf::new(),
411 patches: vec![patch3],
412 };
413 let union = collection1
414 .union_helper(
415 &collection2,
416 |p| Ok(p.rel_patch_path.to_string()),
417 |p| Ok(p.rel_patch_path.to_string()),
418 )
419 .expect("could not create union");
420 assert_eq!(union.patches.len(), 2);
421 assert_eq!(
422 union.patches[0].platforms.iter().collect::<Vec<&String>>(),
423 vec!["x", "z"]
424 );
425 assert_eq!(
426 union.patches[1].platforms.iter().collect::<Vec<&String>>(),
427 vec!["x", "y"]
428 );
429 }
430
431 #[test]
test_union_empties()432 fn test_union_empties() {
433 let patch1 = PatchDictSchema {
434 start_version: Some(0),
435 end_version: Some(1),
436 rel_patch_path: "a".into(),
437 metadata: None,
438 platforms: Default::default(),
439 version_range: Some(VersionRange {
440 from: Some(0),
441 until: Some(1),
442 }),
443 };
444 let collection1 = PatchCollection {
445 workdir: PathBuf::new(),
446 patches: vec![patch1.clone()],
447 };
448 let collection2 = PatchCollection {
449 workdir: PathBuf::new(),
450 patches: vec![patch1],
451 };
452 let union = collection1
453 .union_helper(
454 &collection2,
455 |p| Ok(p.rel_patch_path.to_string()),
456 |p| Ok(p.rel_patch_path.to_string()),
457 )
458 .expect("could not create union");
459 assert_eq!(union.patches.len(), 1);
460 assert_eq!(union.patches[0].platforms.len(), 0);
461 }
462 }
463