• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76class ErrorCode(object):
77  """Define error_codes for failures that happen during the actual
78  update package installation.
79
80  Error codes 0-999 are reserved for failures before the package
81  installation (i.e. low battery, package verification failure).
82  Detailed code in 'bootable/recovery/error_code.h' """
83
84  SYSTEM_VERIFICATION_FAILURE = 1000
85  SYSTEM_UPDATE_FAILURE = 1001
86  SYSTEM_UNEXPECTED_CONTENTS = 1002
87  SYSTEM_NONZERO_CONTENTS = 1003
88  SYSTEM_RECOVER_FAILURE = 1004
89  VENDOR_VERIFICATION_FAILURE = 2000
90  VENDOR_UPDATE_FAILURE = 2001
91  VENDOR_UNEXPECTED_CONTENTS = 2002
92  VENDOR_NONZERO_CONTENTS = 2003
93  VENDOR_RECOVER_FAILURE = 2004
94  OEM_PROP_MISMATCH = 3000
95  FINGERPRINT_MISMATCH = 3001
96  THUMBPRINT_MISMATCH = 3002
97  OLDER_BUILD = 3003
98  DEVICE_MISMATCH = 3004
99  BAD_PATCH_FILE = 3005
100  INSUFFICIENT_CACHE_SPACE = 3006
101  TUNE_PARTITION_FAILURE = 3007
102  APPLY_PATCH_FAILURE = 3008
103
104class ExternalError(RuntimeError):
105  pass
106
107
108def Run(args, **kwargs):
109  """Create and return a subprocess.Popen object, printing the command
110  line on the terminal if -v was specified."""
111  if OPTIONS.verbose:
112    print "  running: ", " ".join(args)
113  return subprocess.Popen(args, **kwargs)
114
115
116def CloseInheritedPipes():
117  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
118  before doing other work."""
119  if platform.system() != "Darwin":
120    return
121  for d in range(3, 1025):
122    try:
123      stat = os.fstat(d)
124      if stat is not None:
125        pipebit = stat[0] & 0x1000
126        if pipebit != 0:
127          os.close(d)
128    except OSError:
129      pass
130
131
132def LoadInfoDict(input_file, input_dir=None):
133  """Read and parse the META/misc_info.txt key/value pairs from the
134  input target files and return a dict."""
135
136  def read_helper(fn):
137    if isinstance(input_file, zipfile.ZipFile):
138      return input_file.read(fn)
139    else:
140      path = os.path.join(input_file, *fn.split("/"))
141      try:
142        with open(path) as f:
143          return f.read()
144      except IOError as e:
145        if e.errno == errno.ENOENT:
146          raise KeyError(fn)
147  d = {}
148  try:
149    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
150  except KeyError:
151    # ok if misc_info.txt doesn't exist
152    pass
153
154  # backwards compatibility: These values used to be in their own
155  # files.  Look for them, in case we're processing an old
156  # target_files zip.
157
158  if "mkyaffs2_extra_flags" not in d:
159    try:
160      d["mkyaffs2_extra_flags"] = read_helper(
161          "META/mkyaffs2-extra-flags.txt").strip()
162    except KeyError:
163      # ok if flags don't exist
164      pass
165
166  if "recovery_api_version" not in d:
167    try:
168      d["recovery_api_version"] = read_helper(
169          "META/recovery-api-version.txt").strip()
170    except KeyError:
171      raise ValueError("can't find recovery API version in input target-files")
172
173  if "tool_extensions" not in d:
174    try:
175      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
176    except KeyError:
177      # ok if extensions don't exist
178      pass
179
180  if "fstab_version" not in d:
181    d["fstab_version"] = "1"
182
183  # A few properties are stored as links to the files in the out/ directory.
184  # It works fine with the build system. However, they are no longer available
185  # when (re)generating from target_files zip. If input_dir is not None, we
186  # are doing repacking. Redirect those properties to the actual files in the
187  # unzipped directory.
188  if input_dir is not None:
189    # We carry a copy of file_contexts.bin under META/. If not available,
190    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
191    # to build images than the one running on device, such as when enabling
192    # system_root_image. In that case, we must have the one for image
193    # generation copied to META/.
194    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
195    fc_config = os.path.join(input_dir, "META", fc_basename)
196    if d.get("system_root_image") == "true":
197      assert os.path.exists(fc_config)
198    if not os.path.exists(fc_config):
199      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
200      if not os.path.exists(fc_config):
201        fc_config = None
202
203    if fc_config:
204      d["selinux_fc"] = fc_config
205
206    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
207    if d.get("system_root_image") == "true":
208      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
209      d["ramdisk_fs_config"] = os.path.join(
210          input_dir, "META", "root_filesystem_config.txt")
211
212    # Redirect {system,vendor}_base_fs_file.
213    if "system_base_fs_file" in d:
214      basename = os.path.basename(d["system_base_fs_file"])
215      system_base_fs_file = os.path.join(input_dir, "META", basename)
216      if os.path.exists(system_base_fs_file):
217        d["system_base_fs_file"] = system_base_fs_file
218      else:
219        print "Warning: failed to find system base fs file: %s" % (
220            system_base_fs_file,)
221        del d["system_base_fs_file"]
222
223    if "vendor_base_fs_file" in d:
224      basename = os.path.basename(d["vendor_base_fs_file"])
225      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
226      if os.path.exists(vendor_base_fs_file):
227        d["vendor_base_fs_file"] = vendor_base_fs_file
228      else:
229        print "Warning: failed to find vendor base fs file: %s" % (
230            vendor_base_fs_file,)
231        del d["vendor_base_fs_file"]
232
233  try:
234    data = read_helper("META/imagesizes.txt")
235    for line in data.split("\n"):
236      if not line:
237        continue
238      name, value = line.split(" ", 1)
239      if not value:
240        continue
241      if name == "blocksize":
242        d[name] = value
243      else:
244        d[name + "_size"] = value
245  except KeyError:
246    pass
247
248  def makeint(key):
249    if key in d:
250      d[key] = int(d[key], 0)
251
252  makeint("recovery_api_version")
253  makeint("blocksize")
254  makeint("system_size")
255  makeint("vendor_size")
256  makeint("userdata_size")
257  makeint("cache_size")
258  makeint("recovery_size")
259  makeint("boot_size")
260  makeint("fstab_version")
261
262  system_root_image = d.get("system_root_image", None) == "true"
263  if d.get("no_recovery", None) != "true":
264    recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
265    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
266        recovery_fstab_path, system_root_image)
267  elif d.get("recovery_as_boot", None) == "true":
268    recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
269    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
270        recovery_fstab_path, system_root_image)
271  else:
272    d["fstab"] = None
273
274  d["build.prop"] = LoadBuildProp(read_helper)
275  return d
276
277def LoadBuildProp(read_helper):
278  try:
279    data = read_helper("SYSTEM/build.prop")
280  except KeyError:
281    print "Warning: could not find SYSTEM/build.prop in %s" % zip
282    data = ""
283  return LoadDictionaryFromLines(data.split("\n"))
284
285def LoadDictionaryFromLines(lines):
286  d = {}
287  for line in lines:
288    line = line.strip()
289    if not line or line.startswith("#"):
290      continue
291    if "=" in line:
292      name, value = line.split("=", 1)
293      d[name] = value
294  return d
295
296def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
297                      system_root_image=False):
298  class Partition(object):
299    def __init__(self, mount_point, fs_type, device, length, device2, context):
300      self.mount_point = mount_point
301      self.fs_type = fs_type
302      self.device = device
303      self.length = length
304      self.device2 = device2
305      self.context = context
306
307  try:
308    data = read_helper(recovery_fstab_path)
309  except KeyError:
310    print "Warning: could not find {}".format(recovery_fstab_path)
311    data = ""
312
313  if fstab_version == 1:
314    d = {}
315    for line in data.split("\n"):
316      line = line.strip()
317      if not line or line.startswith("#"):
318        continue
319      pieces = line.split()
320      if not 3 <= len(pieces) <= 4:
321        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
322      options = None
323      if len(pieces) >= 4:
324        if pieces[3].startswith("/"):
325          device2 = pieces[3]
326          if len(pieces) >= 5:
327            options = pieces[4]
328        else:
329          device2 = None
330          options = pieces[3]
331      else:
332        device2 = None
333
334      mount_point = pieces[0]
335      length = 0
336      if options:
337        options = options.split(",")
338        for i in options:
339          if i.startswith("length="):
340            length = int(i[7:])
341          else:
342            print "%s: unknown option \"%s\"" % (mount_point, i)
343
344      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
345                                 device=pieces[2], length=length,
346                                 device2=device2)
347
348  elif fstab_version == 2:
349    d = {}
350    for line in data.split("\n"):
351      line = line.strip()
352      if not line or line.startswith("#"):
353        continue
354      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
355      pieces = line.split()
356      if len(pieces) != 5:
357        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
358
359      # Ignore entries that are managed by vold
360      options = pieces[4]
361      if "voldmanaged=" in options:
362        continue
363
364      # It's a good line, parse it
365      length = 0
366      options = options.split(",")
367      for i in options:
368        if i.startswith("length="):
369          length = int(i[7:])
370        else:
371          # Ignore all unknown options in the unified fstab
372          continue
373
374      mount_flags = pieces[3]
375      # Honor the SELinux context if present.
376      context = None
377      for i in mount_flags.split(","):
378        if i.startswith("context="):
379          context = i
380
381      mount_point = pieces[1]
382      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
383                                 device=pieces[0], length=length,
384                                 device2=None, context=context)
385
386  else:
387    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
388
389  # / is used for the system mount point when the root directory is included in
390  # system. Other areas assume system is always at "/system" so point /system
391  # at /.
392  if system_root_image:
393    assert not d.has_key("/system") and d.has_key("/")
394    d["/system"] = d["/"]
395  return d
396
397
398def DumpInfoDict(d):
399  for k, v in sorted(d.items()):
400    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
401
402
403def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
404                        has_ramdisk=False):
405  """Build a bootable image from the specified sourcedir.
406
407  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
408  'sourcedir'), and turn them into a boot image.  Return the image data, or
409  None if sourcedir does not appear to contains files for building the
410  requested image."""
411
412  def make_ramdisk():
413    ramdisk_img = tempfile.NamedTemporaryFile()
414
415    if os.access(fs_config_file, os.F_OK):
416      cmd = ["mkbootfs", "-f", fs_config_file,
417             os.path.join(sourcedir, "RAMDISK")]
418    else:
419      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
420    p1 = Run(cmd, stdout=subprocess.PIPE)
421    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
422
423    p2.wait()
424    p1.wait()
425    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
426    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
427
428    return ramdisk_img
429
430  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
431    return None
432
433  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
434    return None
435
436  if info_dict is None:
437    info_dict = OPTIONS.info_dict
438
439  img = tempfile.NamedTemporaryFile()
440
441  if has_ramdisk:
442    ramdisk_img = make_ramdisk()
443
444  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
445  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
446
447  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
448
449  fn = os.path.join(sourcedir, "second")
450  if os.access(fn, os.F_OK):
451    cmd.append("--second")
452    cmd.append(fn)
453
454  fn = os.path.join(sourcedir, "cmdline")
455  if os.access(fn, os.F_OK):
456    cmd.append("--cmdline")
457    cmd.append(open(fn).read().rstrip("\n"))
458
459  fn = os.path.join(sourcedir, "base")
460  if os.access(fn, os.F_OK):
461    cmd.append("--base")
462    cmd.append(open(fn).read().rstrip("\n"))
463
464  fn = os.path.join(sourcedir, "pagesize")
465  if os.access(fn, os.F_OK):
466    cmd.append("--pagesize")
467    cmd.append(open(fn).read().rstrip("\n"))
468
469  args = info_dict.get("mkbootimg_args", None)
470  if args and args.strip():
471    cmd.extend(shlex.split(args))
472
473  args = info_dict.get("mkbootimg_version_args", None)
474  if args and args.strip():
475    cmd.extend(shlex.split(args))
476
477  if has_ramdisk:
478    cmd.extend(["--ramdisk", ramdisk_img.name])
479
480  img_unsigned = None
481  if info_dict.get("vboot", None):
482    img_unsigned = tempfile.NamedTemporaryFile()
483    cmd.extend(["--output", img_unsigned.name])
484  else:
485    cmd.extend(["--output", img.name])
486
487  p = Run(cmd, stdout=subprocess.PIPE)
488  p.communicate()
489  assert p.returncode == 0, "mkbootimg of %s image failed" % (
490      os.path.basename(sourcedir),)
491
492  if (info_dict.get("boot_signer", None) == "true" and
493      info_dict.get("verity_key", None)):
494    path = "/" + os.path.basename(sourcedir).lower()
495    cmd = [OPTIONS.boot_signer_path]
496    cmd.extend(OPTIONS.boot_signer_args)
497    cmd.extend([path, img.name,
498                info_dict["verity_key"] + ".pk8",
499                info_dict["verity_key"] + ".x509.pem", img.name])
500    p = Run(cmd, stdout=subprocess.PIPE)
501    p.communicate()
502    assert p.returncode == 0, "boot_signer of %s image failed" % path
503
504  # Sign the image if vboot is non-empty.
505  elif info_dict.get("vboot", None):
506    path = "/" + os.path.basename(sourcedir).lower()
507    img_keyblock = tempfile.NamedTemporaryFile()
508    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
509           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
510           info_dict["vboot_key"] + ".vbprivk",
511           info_dict["vboot_subkey"] + ".vbprivk",
512           img_keyblock.name,
513           img.name]
514    p = Run(cmd, stdout=subprocess.PIPE)
515    p.communicate()
516    assert p.returncode == 0, "vboot_signer of %s image failed" % path
517
518    # Clean up the temp files.
519    img_unsigned.close()
520    img_keyblock.close()
521
522  img.seek(os.SEEK_SET, 0)
523  data = img.read()
524
525  if has_ramdisk:
526    ramdisk_img.close()
527  img.close()
528
529  return data
530
531
532def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
533                     info_dict=None):
534  """Return a File object with the desired bootable image.
535
536  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
537  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
538  the source files in 'unpack_dir'/'tree_subdir'."""
539
540  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
541  if os.path.exists(prebuilt_path):
542    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
543    return File.FromLocalFile(name, prebuilt_path)
544
545  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
546  if os.path.exists(prebuilt_path):
547    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
548    return File.FromLocalFile(name, prebuilt_path)
549
550  print "building image from target_files %s..." % (tree_subdir,)
551
552  if info_dict is None:
553    info_dict = OPTIONS.info_dict
554
555  # With system_root_image == "true", we don't pack ramdisk into the boot image.
556  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
557  # for recovery.
558  has_ramdisk = (info_dict.get("system_root_image") != "true" or
559                 prebuilt_name != "boot.img" or
560                 info_dict.get("recovery_as_boot") == "true")
561
562  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
563  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
564                             os.path.join(unpack_dir, fs_config),
565                             info_dict, has_ramdisk)
566  if data:
567    return File(name, data)
568  return None
569
570
571def UnzipTemp(filename, pattern=None):
572  """Unzip the given archive into a temporary directory and return the name.
573
574  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
575  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
576
577  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
578  main file), open for reading.
579  """
580
581  tmp = tempfile.mkdtemp(prefix="targetfiles-")
582  OPTIONS.tempfiles.append(tmp)
583
584  def unzip_to_dir(filename, dirname):
585    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
586    if pattern is not None:
587      cmd.append(pattern)
588    p = Run(cmd, stdout=subprocess.PIPE)
589    p.communicate()
590    if p.returncode != 0:
591      raise ExternalError("failed to unzip input target-files \"%s\"" %
592                          (filename,))
593
594  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
595  if m:
596    unzip_to_dir(m.group(1), tmp)
597    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
598    filename = m.group(1)
599  else:
600    unzip_to_dir(filename, tmp)
601
602  return tmp, zipfile.ZipFile(filename, "r")
603
604
605def GetKeyPasswords(keylist):
606  """Given a list of keys, prompt the user to enter passwords for
607  those which require them.  Return a {key: password} dict.  password
608  will be None if the key has no password."""
609
610  no_passwords = []
611  need_passwords = []
612  key_passwords = {}
613  devnull = open("/dev/null", "w+b")
614  for k in sorted(keylist):
615    # We don't need a password for things that aren't really keys.
616    if k in SPECIAL_CERT_STRINGS:
617      no_passwords.append(k)
618      continue
619
620    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
621             "-inform", "DER", "-nocrypt"],
622            stdin=devnull.fileno(),
623            stdout=devnull.fileno(),
624            stderr=subprocess.STDOUT)
625    p.communicate()
626    if p.returncode == 0:
627      # Definitely an unencrypted key.
628      no_passwords.append(k)
629    else:
630      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
631               "-inform", "DER", "-passin", "pass:"],
632              stdin=devnull.fileno(),
633              stdout=devnull.fileno(),
634              stderr=subprocess.PIPE)
635      _, stderr = p.communicate()
636      if p.returncode == 0:
637        # Encrypted key with empty string as password.
638        key_passwords[k] = ''
639      elif stderr.startswith('Error decrypting key'):
640        # Definitely encrypted key.
641        # It would have said "Error reading key" if it didn't parse correctly.
642        need_passwords.append(k)
643      else:
644        # Potentially, a type of key that openssl doesn't understand.
645        # We'll let the routines in signapk.jar handle it.
646        no_passwords.append(k)
647  devnull.close()
648
649  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
650  key_passwords.update(dict.fromkeys(no_passwords, None))
651  return key_passwords
652
653
654def GetMinSdkVersion(apk_name):
655  """Get the minSdkVersion delared in the APK. This can be both a decimal number
656  (API Level) or a codename.
657  """
658
659  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
660  output, err = p.communicate()
661  if err:
662    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
663        % (p.returncode,))
664
665  for line in output.split("\n"):
666    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
667    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
668    if m:
669      return m.group(1)
670  raise ExternalError("No minSdkVersion returned by aapt")
671
672
673def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
674  """Get the minSdkVersion declared in the APK as a number (API Level). If
675  minSdkVersion is set to a codename, it is translated to a number using the
676  provided map.
677  """
678
679  version = GetMinSdkVersion(apk_name)
680  try:
681    return int(version)
682  except ValueError:
683    # Not a decimal number. Codename?
684    if version in codename_to_api_level_map:
685      return codename_to_api_level_map[version]
686    else:
687      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
688                          % (version, codename_to_api_level_map))
689
690
691def SignFile(input_name, output_name, key, password, min_api_level=None,
692    codename_to_api_level_map=dict(),
693    whole_file=False):
694  """Sign the input_name zip/jar/apk, producing output_name.  Use the
695  given key and password (the latter may be None if the key does not
696  have a password.
697
698  If whole_file is true, use the "-w" option to SignApk to embed a
699  signature that covers the whole file in the archive comment of the
700  zip file.
701
702  min_api_level is the API Level (int) of the oldest platform this file may end
703  up on. If not specified for an APK, the API Level is obtained by interpreting
704  the minSdkVersion attribute of the APK's AndroidManifest.xml.
705
706  codename_to_api_level_map is needed to translate the codename which may be
707  encountered as the APK's minSdkVersion.
708  """
709
710  java_library_path = os.path.join(
711      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
712
713  cmd = [OPTIONS.java_path, OPTIONS.java_args,
714         "-Djava.library.path=" + java_library_path,
715         "-jar",
716         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
717  cmd.extend(OPTIONS.extra_signapk_args)
718  if whole_file:
719    cmd.append("-w")
720
721  min_sdk_version = min_api_level
722  if min_sdk_version is None:
723    if not whole_file:
724      min_sdk_version = GetMinSdkVersionInt(
725          input_name, codename_to_api_level_map)
726  if min_sdk_version is not None:
727    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
728
729  cmd.extend([key + OPTIONS.public_key_suffix,
730              key + OPTIONS.private_key_suffix,
731              input_name, output_name])
732
733  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
734  if password is not None:
735    password += "\n"
736  p.communicate(password)
737  if p.returncode != 0:
738    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
739
740
741def CheckSize(data, target, info_dict):
742  """Check the data string passed against the max size limit, if
743  any, for the given target.  Raise exception if the data is too big.
744  Print a warning if the data is nearing the maximum size."""
745
746  if target.endswith(".img"):
747    target = target[:-4]
748  mount_point = "/" + target
749
750  fs_type = None
751  limit = None
752  if info_dict["fstab"]:
753    if mount_point == "/userdata":
754      mount_point = "/data"
755    p = info_dict["fstab"][mount_point]
756    fs_type = p.fs_type
757    device = p.device
758    if "/" in device:
759      device = device[device.rfind("/")+1:]
760    limit = info_dict.get(device + "_size", None)
761  if not fs_type or not limit:
762    return
763
764  if fs_type == "yaffs2":
765    # image size should be increased by 1/64th to account for the
766    # spare area (64 bytes per 2k page)
767    limit = limit / 2048 * (2048+64)
768  size = len(data)
769  pct = float(size) * 100.0 / limit
770  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
771  if pct >= 99.0:
772    raise ExternalError(msg)
773  elif pct >= 95.0:
774    print
775    print "  WARNING: ", msg
776    print
777  elif OPTIONS.verbose:
778    print "  ", msg
779
780
781def ReadApkCerts(tf_zip):
782  """Given a target_files ZipFile, parse the META/apkcerts.txt file
783  and return a {package: cert} dict."""
784  certmap = {}
785  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
786    line = line.strip()
787    if not line:
788      continue
789    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
790                 r'private_key="(.*)"$', line)
791    if m:
792      name, cert, privkey = m.groups()
793      public_key_suffix_len = len(OPTIONS.public_key_suffix)
794      private_key_suffix_len = len(OPTIONS.private_key_suffix)
795      if cert in SPECIAL_CERT_STRINGS and not privkey:
796        certmap[name] = cert
797      elif (cert.endswith(OPTIONS.public_key_suffix) and
798            privkey.endswith(OPTIONS.private_key_suffix) and
799            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
800        certmap[name] = cert[:-public_key_suffix_len]
801      else:
802        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
803  return certmap
804
805
806COMMON_DOCSTRING = """
807  -p  (--path)  <dir>
808      Prepend <dir>/bin to the list of places to search for binaries
809      run by this script, and expect to find jars in <dir>/framework.
810
811  -s  (--device_specific) <file>
812      Path to the python module containing device-specific
813      releasetools code.
814
815  -x  (--extra)  <key=value>
816      Add a key/value pair to the 'extras' dict, which device-specific
817      extension code may look at.
818
819  -v  (--verbose)
820      Show command lines being executed.
821
822  -h  (--help)
823      Display this usage message and exit.
824"""
825
826def Usage(docstring):
827  print docstring.rstrip("\n")
828  print COMMON_DOCSTRING
829
830
831def ParseOptions(argv,
832                 docstring,
833                 extra_opts="", extra_long_opts=(),
834                 extra_option_handler=None):
835  """Parse the options in argv and return any arguments that aren't
836  flags.  docstring is the calling module's docstring, to be displayed
837  for errors and -h.  extra_opts and extra_long_opts are for flags
838  defined by the caller, which are processed by passing them to
839  extra_option_handler."""
840
841  try:
842    opts, args = getopt.getopt(
843        argv, "hvp:s:x:" + extra_opts,
844        ["help", "verbose", "path=", "signapk_path=",
845         "signapk_shared_library_path=", "extra_signapk_args=",
846         "java_path=", "java_args=", "public_key_suffix=",
847         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
848         "verity_signer_path=", "verity_signer_args=", "device_specific=",
849         "extra="] +
850        list(extra_long_opts))
851  except getopt.GetoptError as err:
852    Usage(docstring)
853    print "**", str(err), "**"
854    sys.exit(2)
855
856  for o, a in opts:
857    if o in ("-h", "--help"):
858      Usage(docstring)
859      sys.exit()
860    elif o in ("-v", "--verbose"):
861      OPTIONS.verbose = True
862    elif o in ("-p", "--path"):
863      OPTIONS.search_path = a
864    elif o in ("--signapk_path",):
865      OPTIONS.signapk_path = a
866    elif o in ("--signapk_shared_library_path",):
867      OPTIONS.signapk_shared_library_path = a
868    elif o in ("--extra_signapk_args",):
869      OPTIONS.extra_signapk_args = shlex.split(a)
870    elif o in ("--java_path",):
871      OPTIONS.java_path = a
872    elif o in ("--java_args",):
873      OPTIONS.java_args = a
874    elif o in ("--public_key_suffix",):
875      OPTIONS.public_key_suffix = a
876    elif o in ("--private_key_suffix",):
877      OPTIONS.private_key_suffix = a
878    elif o in ("--boot_signer_path",):
879      OPTIONS.boot_signer_path = a
880    elif o in ("--boot_signer_args",):
881      OPTIONS.boot_signer_args = shlex.split(a)
882    elif o in ("--verity_signer_path",):
883      OPTIONS.verity_signer_path = a
884    elif o in ("--verity_signer_args",):
885      OPTIONS.verity_signer_args = shlex.split(a)
886    elif o in ("-s", "--device_specific"):
887      OPTIONS.device_specific = a
888    elif o in ("-x", "--extra"):
889      key, value = a.split("=", 1)
890      OPTIONS.extras[key] = value
891    else:
892      if extra_option_handler is None or not extra_option_handler(o, a):
893        assert False, "unknown option \"%s\"" % (o,)
894
895  if OPTIONS.search_path:
896    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
897                          os.pathsep + os.environ["PATH"])
898
899  return args
900
901
902def MakeTempFile(prefix=None, suffix=None):
903  """Make a temp file and add it to the list of things to be deleted
904  when Cleanup() is called.  Return the filename."""
905  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
906  os.close(fd)
907  OPTIONS.tempfiles.append(fn)
908  return fn
909
910
911def Cleanup():
912  for i in OPTIONS.tempfiles:
913    if os.path.isdir(i):
914      shutil.rmtree(i)
915    else:
916      os.remove(i)
917
918
919class PasswordManager(object):
920  def __init__(self):
921    self.editor = os.getenv("EDITOR", None)
922    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
923
924  def GetPasswords(self, items):
925    """Get passwords corresponding to each string in 'items',
926    returning a dict.  (The dict may have keys in addition to the
927    values in 'items'.)
928
929    Uses the passwords in $ANDROID_PW_FILE if available, letting the
930    user edit that file to add more needed passwords.  If no editor is
931    available, or $ANDROID_PW_FILE isn't define, prompts the user
932    interactively in the ordinary way.
933    """
934
935    current = self.ReadFile()
936
937    first = True
938    while True:
939      missing = []
940      for i in items:
941        if i not in current or not current[i]:
942          missing.append(i)
943      # Are all the passwords already in the file?
944      if not missing:
945        return current
946
947      for i in missing:
948        current[i] = ""
949
950      if not first:
951        print "key file %s still missing some passwords." % (self.pwfile,)
952        answer = raw_input("try to edit again? [y]> ").strip()
953        if answer and answer[0] not in 'yY':
954          raise RuntimeError("key passwords unavailable")
955      first = False
956
957      current = self.UpdateAndReadFile(current)
958
959  def PromptResult(self, current): # pylint: disable=no-self-use
960    """Prompt the user to enter a value (password) for each key in
961    'current' whose value is fales.  Returns a new dict with all the
962    values.
963    """
964    result = {}
965    for k, v in sorted(current.iteritems()):
966      if v:
967        result[k] = v
968      else:
969        while True:
970          result[k] = getpass.getpass(
971              "Enter password for %s key> " % k).strip()
972          if result[k]:
973            break
974    return result
975
976  def UpdateAndReadFile(self, current):
977    if not self.editor or not self.pwfile:
978      return self.PromptResult(current)
979
980    f = open(self.pwfile, "w")
981    os.chmod(self.pwfile, 0o600)
982    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
983    f.write("# (Additional spaces are harmless.)\n\n")
984
985    first_line = None
986    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
987    for i, (_, k, v) in enumerate(sorted_list):
988      f.write("[[[  %s  ]]] %s\n" % (v, k))
989      if not v and first_line is None:
990        # position cursor on first line with no password.
991        first_line = i + 4
992    f.close()
993
994    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
995    _, _ = p.communicate()
996
997    return self.ReadFile()
998
999  def ReadFile(self):
1000    result = {}
1001    if self.pwfile is None:
1002      return result
1003    try:
1004      f = open(self.pwfile, "r")
1005      for line in f:
1006        line = line.strip()
1007        if not line or line[0] == '#':
1008          continue
1009        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1010        if not m:
1011          print "failed to parse password file: ", line
1012        else:
1013          result[m.group(2)] = m.group(1)
1014      f.close()
1015    except IOError as e:
1016      if e.errno != errno.ENOENT:
1017        print "error reading password file: ", str(e)
1018    return result
1019
1020
1021def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1022             compress_type=None):
1023  import datetime
1024
1025  # http://b/18015246
1026  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1027  # for files larger than 2GiB. We can work around this by adjusting their
1028  # limit. Note that `zipfile.writestr()` will not work for strings larger than
1029  # 2GiB. The Python interpreter sometimes rejects strings that large (though
1030  # it isn't clear to me exactly what circumstances cause this).
1031  # `zipfile.write()` must be used directly to work around this.
1032  #
1033  # This mess can be avoided if we port to python3.
1034  saved_zip64_limit = zipfile.ZIP64_LIMIT
1035  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1036
1037  if compress_type is None:
1038    compress_type = zip_file.compression
1039  if arcname is None:
1040    arcname = filename
1041
1042  saved_stat = os.stat(filename)
1043
1044  try:
1045    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1046    # file to be zipped and reset it when we're done.
1047    os.chmod(filename, perms)
1048
1049    # Use a fixed timestamp so the output is repeatable.
1050    epoch = datetime.datetime.fromtimestamp(0)
1051    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1052    os.utime(filename, (timestamp, timestamp))
1053
1054    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1055  finally:
1056    os.chmod(filename, saved_stat.st_mode)
1057    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1058    zipfile.ZIP64_LIMIT = saved_zip64_limit
1059
1060
1061def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1062                compress_type=None):
1063  """Wrap zipfile.writestr() function to work around the zip64 limit.
1064
1065  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1066  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1067  when calling crc32(bytes).
1068
1069  But it still works fine to write a shorter string into a large zip file.
1070  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1071  when we know the string won't be too long.
1072  """
1073
1074  saved_zip64_limit = zipfile.ZIP64_LIMIT
1075  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1076
1077  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1078    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1079    zinfo.compress_type = zip_file.compression
1080    if perms is None:
1081      perms = 0o100644
1082  else:
1083    zinfo = zinfo_or_arcname
1084
1085  # If compress_type is given, it overrides the value in zinfo.
1086  if compress_type is not None:
1087    zinfo.compress_type = compress_type
1088
1089  # If perms is given, it has a priority.
1090  if perms is not None:
1091    # If perms doesn't set the file type, mark it as a regular file.
1092    if perms & 0o770000 == 0:
1093      perms |= 0o100000
1094    zinfo.external_attr = perms << 16
1095
1096  # Use a fixed timestamp so the output is repeatable.
1097  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1098
1099  zip_file.writestr(zinfo, data)
1100  zipfile.ZIP64_LIMIT = saved_zip64_limit
1101
1102
1103def ZipClose(zip_file):
1104  # http://b/18015246
1105  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1106  # central directory.
1107  saved_zip64_limit = zipfile.ZIP64_LIMIT
1108  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1109
1110  zip_file.close()
1111
1112  zipfile.ZIP64_LIMIT = saved_zip64_limit
1113
1114
1115class DeviceSpecificParams(object):
1116  module = None
1117  def __init__(self, **kwargs):
1118    """Keyword arguments to the constructor become attributes of this
1119    object, which is passed to all functions in the device-specific
1120    module."""
1121    for k, v in kwargs.iteritems():
1122      setattr(self, k, v)
1123    self.extras = OPTIONS.extras
1124
1125    if self.module is None:
1126      path = OPTIONS.device_specific
1127      if not path:
1128        return
1129      try:
1130        if os.path.isdir(path):
1131          info = imp.find_module("releasetools", [path])
1132        else:
1133          d, f = os.path.split(path)
1134          b, x = os.path.splitext(f)
1135          if x == ".py":
1136            f = b
1137          info = imp.find_module(f, [d])
1138        print "loaded device-specific extensions from", path
1139        self.module = imp.load_module("device_specific", *info)
1140      except ImportError:
1141        print "unable to load device-specific module; assuming none"
1142
1143  def _DoCall(self, function_name, *args, **kwargs):
1144    """Call the named function in the device-specific module, passing
1145    the given args and kwargs.  The first argument to the call will be
1146    the DeviceSpecific object itself.  If there is no module, or the
1147    module does not define the function, return the value of the
1148    'default' kwarg (which itself defaults to None)."""
1149    if self.module is None or not hasattr(self.module, function_name):
1150      return kwargs.get("default", None)
1151    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1152
1153  def FullOTA_Assertions(self):
1154    """Called after emitting the block of assertions at the top of a
1155    full OTA package.  Implementations can add whatever additional
1156    assertions they like."""
1157    return self._DoCall("FullOTA_Assertions")
1158
1159  def FullOTA_InstallBegin(self):
1160    """Called at the start of full OTA installation."""
1161    return self._DoCall("FullOTA_InstallBegin")
1162
1163  def FullOTA_InstallEnd(self):
1164    """Called at the end of full OTA installation; typically this is
1165    used to install the image for the device's baseband processor."""
1166    return self._DoCall("FullOTA_InstallEnd")
1167
1168  def IncrementalOTA_Assertions(self):
1169    """Called after emitting the block of assertions at the top of an
1170    incremental OTA package.  Implementations can add whatever
1171    additional assertions they like."""
1172    return self._DoCall("IncrementalOTA_Assertions")
1173
1174  def IncrementalOTA_VerifyBegin(self):
1175    """Called at the start of the verification phase of incremental
1176    OTA installation; additional checks can be placed here to abort
1177    the script before any changes are made."""
1178    return self._DoCall("IncrementalOTA_VerifyBegin")
1179
1180  def IncrementalOTA_VerifyEnd(self):
1181    """Called at the end of the verification phase of incremental OTA
1182    installation; additional checks can be placed here to abort the
1183    script before any changes are made."""
1184    return self._DoCall("IncrementalOTA_VerifyEnd")
1185
1186  def IncrementalOTA_InstallBegin(self):
1187    """Called at the start of incremental OTA installation (after
1188    verification is complete)."""
1189    return self._DoCall("IncrementalOTA_InstallBegin")
1190
1191  def IncrementalOTA_InstallEnd(self):
1192    """Called at the end of incremental OTA installation; typically
1193    this is used to install the image for the device's baseband
1194    processor."""
1195    return self._DoCall("IncrementalOTA_InstallEnd")
1196
1197  def VerifyOTA_Assertions(self):
1198    return self._DoCall("VerifyOTA_Assertions")
1199
1200class File(object):
1201  def __init__(self, name, data):
1202    self.name = name
1203    self.data = data
1204    self.size = len(data)
1205    self.sha1 = sha1(data).hexdigest()
1206
1207  @classmethod
1208  def FromLocalFile(cls, name, diskname):
1209    f = open(diskname, "rb")
1210    data = f.read()
1211    f.close()
1212    return File(name, data)
1213
1214  def WriteToTemp(self):
1215    t = tempfile.NamedTemporaryFile()
1216    t.write(self.data)
1217    t.flush()
1218    return t
1219
1220  def AddToZip(self, z, compression=None):
1221    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1222
1223DIFF_PROGRAM_BY_EXT = {
1224    ".gz" : "imgdiff",
1225    ".zip" : ["imgdiff", "-z"],
1226    ".jar" : ["imgdiff", "-z"],
1227    ".apk" : ["imgdiff", "-z"],
1228    ".img" : "imgdiff",
1229    }
1230
1231class Difference(object):
1232  def __init__(self, tf, sf, diff_program=None):
1233    self.tf = tf
1234    self.sf = sf
1235    self.patch = None
1236    self.diff_program = diff_program
1237
1238  def ComputePatch(self):
1239    """Compute the patch (as a string of data) needed to turn sf into
1240    tf.  Returns the same tuple as GetPatch()."""
1241
1242    tf = self.tf
1243    sf = self.sf
1244
1245    if self.diff_program:
1246      diff_program = self.diff_program
1247    else:
1248      ext = os.path.splitext(tf.name)[1]
1249      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1250
1251    ttemp = tf.WriteToTemp()
1252    stemp = sf.WriteToTemp()
1253
1254    ext = os.path.splitext(tf.name)[1]
1255
1256    try:
1257      ptemp = tempfile.NamedTemporaryFile()
1258      if isinstance(diff_program, list):
1259        cmd = copy.copy(diff_program)
1260      else:
1261        cmd = [diff_program]
1262      cmd.append(stemp.name)
1263      cmd.append(ttemp.name)
1264      cmd.append(ptemp.name)
1265      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1266      err = []
1267      def run():
1268        _, e = p.communicate()
1269        if e:
1270          err.append(e)
1271      th = threading.Thread(target=run)
1272      th.start()
1273      th.join(timeout=300)   # 5 mins
1274      if th.is_alive():
1275        print "WARNING: diff command timed out"
1276        p.terminate()
1277        th.join(5)
1278        if th.is_alive():
1279          p.kill()
1280          th.join()
1281
1282      if err or p.returncode != 0:
1283        print "WARNING: failure running %s:\n%s\n" % (
1284            diff_program, "".join(err))
1285        self.patch = None
1286        return None, None, None
1287      diff = ptemp.read()
1288    finally:
1289      ptemp.close()
1290      stemp.close()
1291      ttemp.close()
1292
1293    self.patch = diff
1294    return self.tf, self.sf, self.patch
1295
1296
1297  def GetPatch(self):
1298    """Return a tuple (target_file, source_file, patch_data).
1299    patch_data may be None if ComputePatch hasn't been called, or if
1300    computing the patch failed."""
1301    return self.tf, self.sf, self.patch
1302
1303
1304def ComputeDifferences(diffs):
1305  """Call ComputePatch on all the Difference objects in 'diffs'."""
1306  print len(diffs), "diffs to compute"
1307
1308  # Do the largest files first, to try and reduce the long-pole effect.
1309  by_size = [(i.tf.size, i) for i in diffs]
1310  by_size.sort(reverse=True)
1311  by_size = [i[1] for i in by_size]
1312
1313  lock = threading.Lock()
1314  diff_iter = iter(by_size)   # accessed under lock
1315
1316  def worker():
1317    try:
1318      lock.acquire()
1319      for d in diff_iter:
1320        lock.release()
1321        start = time.time()
1322        d.ComputePatch()
1323        dur = time.time() - start
1324        lock.acquire()
1325
1326        tf, sf, patch = d.GetPatch()
1327        if sf.name == tf.name:
1328          name = tf.name
1329        else:
1330          name = "%s (%s)" % (tf.name, sf.name)
1331        if patch is None:
1332          print "patching failed!                                  %s" % (name,)
1333        else:
1334          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1335              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1336      lock.release()
1337    except Exception as e:
1338      print e
1339      raise
1340
1341  # start worker threads; wait for them all to finish.
1342  threads = [threading.Thread(target=worker)
1343             for i in range(OPTIONS.worker_threads)]
1344  for th in threads:
1345    th.start()
1346  while threads:
1347    threads.pop().join()
1348
1349
1350class BlockDifference(object):
1351  def __init__(self, partition, tgt, src=None, check_first_block=False,
1352               version=None, disable_imgdiff=False):
1353    self.tgt = tgt
1354    self.src = src
1355    self.partition = partition
1356    self.check_first_block = check_first_block
1357    self.disable_imgdiff = disable_imgdiff
1358
1359    if version is None:
1360      version = 1
1361      if OPTIONS.info_dict:
1362        version = max(
1363            int(i) for i in
1364            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1365    self.version = version
1366
1367    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1368                                    version=self.version,
1369                                    disable_imgdiff=self.disable_imgdiff)
1370    tmpdir = tempfile.mkdtemp()
1371    OPTIONS.tempfiles.append(tmpdir)
1372    self.path = os.path.join(tmpdir, partition)
1373    b.Compute(self.path)
1374    self._required_cache = b.max_stashed_size
1375    self.touched_src_ranges = b.touched_src_ranges
1376    self.touched_src_sha1 = b.touched_src_sha1
1377
1378    if src is None:
1379      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1380    else:
1381      _, self.device = GetTypeAndDevice("/" + partition,
1382                                        OPTIONS.source_info_dict)
1383
1384  @property
1385  def required_cache(self):
1386    return self._required_cache
1387
1388  def WriteScript(self, script, output_zip, progress=None):
1389    if not self.src:
1390      # write the output unconditionally
1391      script.Print("Patching %s image unconditionally..." % (self.partition,))
1392    else:
1393      script.Print("Patching %s image after verification." % (self.partition,))
1394
1395    if progress:
1396      script.ShowProgress(progress, 0)
1397    self._WriteUpdate(script, output_zip)
1398    if OPTIONS.verify:
1399      self._WritePostInstallVerifyScript(script)
1400
1401  def WriteStrictVerifyScript(self, script):
1402    """Verify all the blocks in the care_map, including clobbered blocks.
1403
1404    This differs from the WriteVerifyScript() function: a) it prints different
1405    error messages; b) it doesn't allow half-way updated images to pass the
1406    verification."""
1407
1408    partition = self.partition
1409    script.Print("Verifying %s..." % (partition,))
1410    ranges = self.tgt.care_map
1411    ranges_str = ranges.to_string_raw()
1412    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1413                       'ui_print("    Verified.") || '
1414                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1415                       self.device, ranges_str,
1416                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1417                       self.device))
1418    script.AppendExtra("")
1419
1420  def WriteVerifyScript(self, script, touched_blocks_only=False):
1421    partition = self.partition
1422
1423    # full OTA
1424    if not self.src:
1425      script.Print("Image %s will be patched unconditionally." % (partition,))
1426
1427    # incremental OTA
1428    else:
1429      if touched_blocks_only and self.version >= 3:
1430        ranges = self.touched_src_ranges
1431        expected_sha1 = self.touched_src_sha1
1432      else:
1433        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1434        expected_sha1 = self.src.TotalSha1()
1435
1436      # No blocks to be checked, skipping.
1437      if not ranges:
1438        return
1439
1440      ranges_str = ranges.to_string_raw()
1441      if self.version >= 4:
1442        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1443                            'block_image_verify("%s", '
1444                            'package_extract_file("%s.transfer.list"), '
1445                            '"%s.new.dat", "%s.patch.dat")) then') % (
1446                            self.device, ranges_str, expected_sha1,
1447                            self.device, partition, partition, partition))
1448      elif self.version == 3:
1449        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1450                            'block_image_verify("%s", '
1451                            'package_extract_file("%s.transfer.list"), '
1452                            '"%s.new.dat", "%s.patch.dat")) then') % (
1453                            self.device, ranges_str, expected_sha1,
1454                            self.device, partition, partition, partition))
1455      else:
1456        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1457                           self.device, ranges_str, self.src.TotalSha1()))
1458      script.Print('Verified %s image...' % (partition,))
1459      script.AppendExtra('else')
1460
1461      if self.version >= 4:
1462
1463        # Bug: 21124327
1464        # When generating incrementals for the system and vendor partitions in
1465        # version 4 or newer, explicitly check the first block (which contains
1466        # the superblock) of the partition to see if it's what we expect. If
1467        # this check fails, give an explicit log message about the partition
1468        # having been remounted R/W (the most likely explanation).
1469        if self.check_first_block:
1470          script.AppendExtra('check_first_block("%s");' % (self.device,))
1471
1472        # If version >= 4, try block recovery before abort update
1473        if partition == "system":
1474          code = ErrorCode.SYSTEM_RECOVER_FAILURE
1475        else:
1476          code = ErrorCode.VENDOR_RECOVER_FAILURE
1477        script.AppendExtra((
1478            'ifelse (block_image_recover("{device}", "{ranges}") && '
1479            'block_image_verify("{device}", '
1480            'package_extract_file("{partition}.transfer.list"), '
1481            '"{partition}.new.dat", "{partition}.patch.dat"), '
1482            'ui_print("{partition} recovered successfully."), '
1483            'abort("E{code}: {partition} partition fails to recover"));\n'
1484            'endif;').format(device=self.device, ranges=ranges_str,
1485                             partition=partition, code=code))
1486
1487      # Abort the OTA update. Note that the incremental OTA cannot be applied
1488      # even if it may match the checksum of the target partition.
1489      # a) If version < 3, operations like move and erase will make changes
1490      #    unconditionally and damage the partition.
1491      # b) If version >= 3, it won't even reach here.
1492      else:
1493        if partition == "system":
1494          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1495        else:
1496          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1497        script.AppendExtra((
1498            'abort("E%d: %s partition has unexpected contents");\n'
1499            'endif;') % (code, partition))
1500
1501  def _WritePostInstallVerifyScript(self, script):
1502    partition = self.partition
1503    script.Print('Verifying the updated %s image...' % (partition,))
1504    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1505    ranges = self.tgt.care_map
1506    ranges_str = ranges.to_string_raw()
1507    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1508                       self.device, ranges_str,
1509                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1510
1511    # Bug: 20881595
1512    # Verify that extended blocks are really zeroed out.
1513    if self.tgt.extended:
1514      ranges_str = self.tgt.extended.to_string_raw()
1515      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1516                         self.device, ranges_str,
1517                         self._HashZeroBlocks(self.tgt.extended.size())))
1518      script.Print('Verified the updated %s image.' % (partition,))
1519      if partition == "system":
1520        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1521      else:
1522        code = ErrorCode.VENDOR_NONZERO_CONTENTS
1523      script.AppendExtra(
1524          'else\n'
1525          '  abort("E%d: %s partition has unexpected non-zero contents after '
1526          'OTA update");\n'
1527          'endif;' % (code, partition))
1528    else:
1529      script.Print('Verified the updated %s image.' % (partition,))
1530
1531    if partition == "system":
1532      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1533    else:
1534      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1535
1536    script.AppendExtra(
1537        'else\n'
1538        '  abort("E%d: %s partition has unexpected contents after OTA '
1539        'update");\n'
1540        'endif;' % (code, partition))
1541
1542  def _WriteUpdate(self, script, output_zip):
1543    ZipWrite(output_zip,
1544             '{}.transfer.list'.format(self.path),
1545             '{}.transfer.list'.format(self.partition))
1546    ZipWrite(output_zip,
1547             '{}.new.dat'.format(self.path),
1548             '{}.new.dat'.format(self.partition))
1549    ZipWrite(output_zip,
1550             '{}.patch.dat'.format(self.path),
1551             '{}.patch.dat'.format(self.partition),
1552             compress_type=zipfile.ZIP_STORED)
1553
1554    if self.partition == "system":
1555      code = ErrorCode.SYSTEM_UPDATE_FAILURE
1556    else:
1557      code = ErrorCode.VENDOR_UPDATE_FAILURE
1558
1559    call = ('block_image_update("{device}", '
1560            'package_extract_file("{partition}.transfer.list"), '
1561            '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1562            '  abort("E{code}: Failed to update {partition} image.");'.format(
1563                device=self.device, partition=self.partition, code=code))
1564    script.AppendExtra(script.WordWrap(call))
1565
1566  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1567    data = source.ReadRangeSet(ranges)
1568    ctx = sha1()
1569
1570    for p in data:
1571      ctx.update(p)
1572
1573    return ctx.hexdigest()
1574
1575  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1576    """Return the hash value for all zero blocks."""
1577    zero_block = '\x00' * 4096
1578    ctx = sha1()
1579    for _ in range(num_blocks):
1580      ctx.update(zero_block)
1581
1582    return ctx.hexdigest()
1583
1584
1585DataImage = blockimgdiff.DataImage
1586
1587# map recovery.fstab's fs_types to mount/format "partition types"
1588PARTITION_TYPES = {
1589    "yaffs2": "MTD",
1590    "mtd": "MTD",
1591    "ext4": "EMMC",
1592    "emmc": "EMMC",
1593    "f2fs": "EMMC",
1594    "squashfs": "EMMC"
1595}
1596
1597def GetTypeAndDevice(mount_point, info):
1598  fstab = info["fstab"]
1599  if fstab:
1600    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1601            fstab[mount_point].device)
1602  else:
1603    raise KeyError
1604
1605
1606def ParseCertificate(data):
1607  """Parse a PEM-format certificate."""
1608  cert = []
1609  save = False
1610  for line in data.split("\n"):
1611    if "--END CERTIFICATE--" in line:
1612      break
1613    if save:
1614      cert.append(line)
1615    if "--BEGIN CERTIFICATE--" in line:
1616      save = True
1617  cert = "".join(cert).decode('base64')
1618  return cert
1619
1620def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1621                      info_dict=None):
1622  """Generate a binary patch that creates the recovery image starting
1623  with the boot image.  (Most of the space in these images is just the
1624  kernel, which is identical for the two, so the resulting patch
1625  should be efficient.)  Add it to the output zip, along with a shell
1626  script that is run from init.rc on first boot to actually do the
1627  patching and install the new recovery image.
1628
1629  recovery_img and boot_img should be File objects for the
1630  corresponding images.  info should be the dictionary returned by
1631  common.LoadInfoDict() on the input target_files.
1632  """
1633
1634  if info_dict is None:
1635    info_dict = OPTIONS.info_dict
1636
1637  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1638  system_root_image = info_dict.get("system_root_image", None) == "true"
1639
1640  if full_recovery_image:
1641    output_sink("etc/recovery.img", recovery_img.data)
1642
1643  else:
1644    diff_program = ["imgdiff"]
1645    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1646    if os.path.exists(path):
1647      diff_program.append("-b")
1648      diff_program.append(path)
1649      bonus_args = "-b /system/etc/recovery-resource.dat"
1650    else:
1651      bonus_args = ""
1652
1653    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1654    _, _, patch = d.ComputePatch()
1655    output_sink("recovery-from-boot.p", patch)
1656
1657  try:
1658    # The following GetTypeAndDevice()s need to use the path in the target
1659    # info_dict instead of source_info_dict.
1660    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1661    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1662  except KeyError:
1663    return
1664
1665  if full_recovery_image:
1666    sh = """#!/system/bin/sh
1667if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1668  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1669else
1670  log -t recovery "Recovery image already installed"
1671fi
1672""" % {'type': recovery_type,
1673       'device': recovery_device,
1674       'sha1': recovery_img.sha1,
1675       'size': recovery_img.size}
1676  else:
1677    sh = """#!/system/bin/sh
1678if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1679  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1680else
1681  log -t recovery "Recovery image already installed"
1682fi
1683""" % {'boot_size': boot_img.size,
1684       'boot_sha1': boot_img.sha1,
1685       'recovery_size': recovery_img.size,
1686       'recovery_sha1': recovery_img.sha1,
1687       'boot_type': boot_type,
1688       'boot_device': boot_device,
1689       'recovery_type': recovery_type,
1690       'recovery_device': recovery_device,
1691       'bonus_args': bonus_args}
1692
1693  # The install script location moved from /system/etc to /system/bin
1694  # in the L release.  Parse init.*.rc files to find out where the
1695  # target-files expects it to be, and put it there.
1696  sh_location = "etc/install-recovery.sh"
1697  found = False
1698  if system_root_image:
1699    init_rc_dir = os.path.join(input_dir, "ROOT")
1700  else:
1701    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1702  init_rc_files = os.listdir(init_rc_dir)
1703  for init_rc_file in init_rc_files:
1704    if (not init_rc_file.startswith('init.') or
1705        not init_rc_file.endswith('.rc')):
1706      continue
1707
1708    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1709      for line in f:
1710        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1711        if m:
1712          sh_location = m.group(1)
1713          found = True
1714          break
1715
1716    if found:
1717      break
1718
1719  print "putting script in", sh_location
1720
1721  output_sink(sh_location, sh)
1722