diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index b3d2edeb38..7984ad684b 100755 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -55,14 +55,14 @@ def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None): return def output_sink(fn, data): - ofile = open(os.path.join(OPTIONS.input_tmp,"SYSTEM",fn), "w") - ofile.write(data) - ofile.close() + ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w") + ofile.write(data) + ofile.close() if OPTIONS.rebuild_recovery: - print("Building new recovery patch") - common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, - info_dict=OPTIONS.info_dict) + print "Building new recovery patch" + common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, + boot_img, info_dict=OPTIONS.info_dict) block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map") imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict, @@ -88,7 +88,7 @@ def AddVendor(output_zip, prefix="IMAGES/"): block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map") imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict, - block_list=block_list) + block_list=block_list) common.ZipWrite(output_zip, imgname, prefix + "vendor.img") common.ZipWrite(output_zip, block_list, prefix + "vendor.map") @@ -110,18 +110,18 @@ def CreateImage(input_dir, info_dict, what, block_list=None): try: os.symlink(os.path.join(input_dir, what.upper()), os.path.join(input_dir, what)) - except OSError, e: - # bogus error on my mac version? - # File "./build/tools/releasetools/img_from_target_files", line 86, in AddSystem - # os.path.join(OPTIONS.input_tmp, "system")) - # OSError: [Errno 17] File exists - if (e.errno == errno.EEXIST): + except OSError as e: + # bogus error on my mac version? + # File "./build/tools/releasetools/img_from_target_files" + # os.path.join(OPTIONS.input_tmp, "system")) + # OSError: [Errno 17] File exists + if e.errno == errno.EEXIST: pass image_props = build_image.ImagePropFromGlobalDict(info_dict, what) fstab = info_dict["fstab"] if fstab: - image_props["fs_type" ] = fstab["/" + what].fs_type + image_props["fs_type"] = fstab["/" + what].fs_type if what == "system": fs_config_prefix = "" @@ -130,10 +130,12 @@ def CreateImage(input_dir, info_dict, what, block_list=None): fs_config = os.path.join( input_dir, "META/" + fs_config_prefix + "filesystem_config.txt") - if not os.path.exists(fs_config): fs_config = None + if not os.path.exists(fs_config): + fs_config = None fc_config = os.path.join(input_dir, "BOOT/RAMDISK/file_contexts") - if not os.path.exists(fc_config): fc_config = None + if not os.path.exists(fc_config): + fc_config = None # Override values loaded from info_dict. if fs_config: @@ -182,7 +184,7 @@ def AddUserdata(output_zip, prefix="IMAGES/"): fstab = OPTIONS.info_dict["fstab"] if fstab: - image_props["fs_type" ] = fstab["/data"].fs_type + image_props["fs_type"] = fstab["/data"].fs_type succ = build_image.BuildImage(user_dir, image_props, img.name) assert succ, "build userdata.img image failed" @@ -219,7 +221,7 @@ def AddCache(output_zip, prefix="IMAGES/"): fstab = OPTIONS.info_dict["fstab"] if fstab: - image_props["fs_type" ] = fstab["/cache"].fs_type + image_props["fs_type"] = fstab["/cache"].fs_type succ = build_image.BuildImage(user_dir, image_props, img.name) assert succ, "build cache.img image failed" @@ -298,7 +300,7 @@ def AddImagesToTargetFiles(filename): output_zip.close() def main(argv): - def option_handler(o, a): + def option_handler(o, _): if o in ("-a", "--add_missing"): OPTIONS.add_missing = True elif o in ("-r", "--rebuild_recovery",): @@ -307,12 +309,10 @@ def main(argv): return False return True - args = common.ParseOptions(argv, __doc__, - extra_opts="ar", - extra_long_opts=["add_missing", - "rebuild_recovery", - ], - extra_option_handler=option_handler) + args = common.ParseOptions( + argv, __doc__, extra_opts="ar", + extra_long_opts=["add_missing", "rebuild_recovery"], + extra_option_handler=option_handler) if len(args) != 1: @@ -326,7 +326,7 @@ if __name__ == '__main__': try: common.CloseInheritedPipes() main(sys.argv[1:]) - except common.ExternalError, e: + except common.ExternalError as e: print print " ERROR: %s" % (e,) print diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py index f031078a5c..925d38b316 100644 --- a/tools/releasetools/blockimgdiff.py +++ b/tools/releasetools/blockimgdiff.py @@ -20,17 +20,17 @@ import heapq import itertools import multiprocessing import os -import pprint import re import subprocess -import sys import threading import tempfile -from rangelib import * +from rangelib import RangeSet + __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"] + def compute_patch(src, tgt, imgdiff=False): srcfd, srcfile = tempfile.mkstemp(prefix="src-") tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-") @@ -69,7 +69,16 @@ def compute_patch(src, tgt, imgdiff=False): except OSError: pass -class EmptyImage(object): + +class Image(object): + def ReadRangeSet(self, ranges): + raise NotImplementedError + + def TotalSha1(self): + raise NotImplementedError + + +class EmptyImage(Image): """A zero-length image.""" blocksize = 4096 care_map = RangeSet() @@ -81,7 +90,7 @@ class EmptyImage(object): return sha1().hexdigest() -class DataImage(object): +class DataImage(Image): """An image wrapped around a single string of data.""" def __init__(self, data, trim=False, pad=False): @@ -126,9 +135,7 @@ class DataImage(object): return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges] def TotalSha1(self): - if not hasattr(self, "sha1"): - self.sha1 = sha1(self.data).hexdigest() - return self.sha1 + return sha1(self.data).hexdigest() class Transfer(object): @@ -196,9 +203,13 @@ class BlockImageDiff(object): def __init__(self, tgt, src=None, threads=None, version=2): if threads is None: threads = multiprocessing.cpu_count() // 2 - if threads == 0: threads = 1 + if threads == 0: + threads = 1 self.threads = threads self.version = version + self.transfers = [] + self.src_basenames = {} + self.src_numpatterns = {} assert version in (1, 2) @@ -247,6 +258,15 @@ class BlockImageDiff(object): self.ComputePatches(prefix) self.WriteTransfers(prefix) + def HashBlocks(self, source, ranges): # pylint: disable=no-self-use + data = source.ReadRangeSet(ranges) + ctx = sha1() + + for p in data: + ctx.update(p) + + return ctx.hexdigest() + def WriteTransfers(self, prefix): out = [] @@ -283,8 +303,8 @@ class BlockImageDiff(object): free_string = [] if self.version == 1: - src_string = xf.src_ranges.to_string_raw() - elif self.version == 2: + src_str = xf.src_ranges.to_string_raw() + elif self.version >= 2: # <# blocks> # OR @@ -293,7 +313,7 @@ class BlockImageDiff(object): # <# blocks> - size = xf.src_ranges.size() - src_string = [str(size)] + src_str = [str(size)] unstashed_src_ranges = xf.src_ranges mapped_stashes = [] @@ -303,21 +323,29 @@ class BlockImageDiff(object): unstashed_src_ranges = unstashed_src_ranges.subtract(sr) sr = xf.src_ranges.map_within(sr) mapped_stashes.append(sr) - src_string.append("%d:%s" % (sid, sr.to_string_raw())) + if self.version == 2: + src_str.append("%d:%s" % (sid, sr.to_string_raw())) + else: + assert sh in stashes + src_str.append("%s:%s" % (sh, sr.to_string_raw())) + stashes[sh] -= 1 + if stashes[sh] == 0: + free_string.append("free %s\n" % (sh)) + stashes.pop(sh) heapq.heappush(free_stash_ids, sid) if unstashed_src_ranges: - src_string.insert(1, unstashed_src_ranges.to_string_raw()) + src_str.insert(1, unstashed_src_ranges.to_string_raw()) if xf.use_stash: mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges) - src_string.insert(2, mapped_unstashed.to_string_raw()) + src_str.insert(2, mapped_unstashed.to_string_raw()) mapped_stashes.append(mapped_unstashed) self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes) else: - src_string.insert(1, "-") + src_str.insert(1, "-") self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes) - src_string = " ".join(src_string) + src_str = " ".join(src_str) # both versions: # zero @@ -330,9 +358,14 @@ class BlockImageDiff(object): # move # # version 2: - # bsdiff patchstart patchlen - # imgdiff patchstart patchlen - # move + # bsdiff patchstart patchlen + # imgdiff patchstart patchlen + # move + # + # version 3: + # bsdiff patchstart patchlen srchash tgthash + # imgdiff patchstart patchlen srchash tgthash + # move hash tgt_size = xf.tgt_ranges.size() @@ -352,7 +385,12 @@ class BlockImageDiff(object): elif self.version == 2: out.append("%s %s %s\n" % ( xf.style, - xf.tgt_ranges.to_string_raw(), src_string)) + xf.tgt_ranges.to_string_raw(), src_str)) + elif self.version >= 3: + out.append("%s %s %s %s\n" % ( + xf.style, + self.HashBlocks(self.tgt, xf.tgt_ranges), + xf.tgt_ranges.to_string_raw(), src_str)) total += tgt_size elif xf.style in ("bsdiff", "imgdiff"): performs_read = True @@ -365,7 +403,14 @@ class BlockImageDiff(object): elif self.version == 2: out.append("%s %d %d %s %s\n" % ( xf.style, xf.patch_start, xf.patch_len, - xf.tgt_ranges.to_string_raw(), src_string)) + xf.tgt_ranges.to_string_raw(), src_str)) + elif self.version >= 3: + out.append("%s %d %d %s %s %s %s\n" % ( + xf.style, + xf.patch_start, xf.patch_len, + self.HashBlocks(self.src, xf.src_ranges), + self.HashBlocks(self.tgt, xf.tgt_ranges), + xf.tgt_ranges.to_string_raw(), src_str)) total += tgt_size elif xf.style == "zero": assert xf.tgt_ranges @@ -374,8 +419,10 @@ class BlockImageDiff(object): out.append("%s %s\n" % (xf.style, to_zero.to_string_raw())) total += to_zero.size() else: - raise ValueError, "unknown transfer style '%s'\n" % (xf.style,) + raise ValueError("unknown transfer style '%s'\n" % xf.style) + if free_string: + out.append("".join(free_string)) # sanity check: abort if we're going to need more than 512 MB if # stash space @@ -481,11 +528,13 @@ class BlockImageDiff(object): patches = [None] * patch_num + # TODO: Rewrite with multiprocessing.ThreadPool? lock = threading.Lock() def diff_worker(): while True: with lock: - if not diff_q: return + if not diff_q: + return tgt_size, src, tgt, xf, patchnum = diff_q.pop() patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff")) size = len(patch) @@ -497,7 +546,7 @@ class BlockImageDiff(object): xf.tgt_name + " (from " + xf.src_name + ")"))) threads = [threading.Thread(target=diff_worker) - for i in range(self.threads)] + for _ in range(self.threads)] for th in threads: th.start() while threads: @@ -624,8 +673,6 @@ class BlockImageDiff(object): stash_size = 0 for xf in self.transfers: - lost = 0 - size = xf.src_ranges.size() for u in xf.goes_before.copy(): # xf should go before u if xf.order < u.order: @@ -691,7 +738,8 @@ class BlockImageDiff(object): # Put all sinks at the end of the sequence. while True: sinks = [u for u in G if not u.outgoing] - if not sinks: break + if not sinks: + break for u in sinks: s2.appendleft(u) del G[u] @@ -701,14 +749,16 @@ class BlockImageDiff(object): # Put all the sources at the beginning of the sequence. while True: sources = [u for u in G if not u.incoming] - if not sources: break + if not sources: + break for u in sources: s1.append(u) del G[u] for iu in u.outgoing: del iu.incoming[u] - if not G: break + if not G: + break # Find the "best" vertex to put next. "Best" is the one that # maximizes the net difference in source blocks saved we get by @@ -746,7 +796,8 @@ class BlockImageDiff(object): print("Generating digraph...") for a in self.transfers: for b in self.transfers: - if a is b: continue + if a is b: + continue # If the blocks written by A are read by B, then B needs to go before A. i = a.tgt_ranges.intersect(b.src_ranges) @@ -761,7 +812,6 @@ class BlockImageDiff(object): a.goes_after[b] = size def FindTransfers(self): - self.transfers = [] empty = RangeSet() for tgt_fn, tgt_ranges in self.tgt.file_map.items(): if tgt_fn == "__ZERO": @@ -801,9 +851,6 @@ class BlockImageDiff(object): Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) def AbbreviateSourceNames(self): - self.src_basenames = {} - self.src_numpatterns = {} - for k in self.src.file_map.keys(): b = os.path.basename(k) self.src_basenames[b] = k diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py index 97353dfd3e..33540d2311 100755 --- a/tools/releasetools/build_image.py +++ b/tools/releasetools/build_image.py @@ -72,14 +72,15 @@ def AdjustPartitionSizeForVerity(partition_size): """ success, verity_tree_size = GetVerityTreeSize(partition_size) if not success: - return 0; + return 0 success, verity_metadata_size = GetVerityMetadataSize(partition_size) if not success: return 0 return partition_size - verity_tree_size - verity_metadata_size def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict): - cmd = ("build_verity_tree -A %s %s %s" % (FIXED_SALT, sparse_image_path, verity_image_path)) + cmd = "build_verity_tree -A %s %s %s" % ( + FIXED_SALT, sparse_image_path, verity_image_path) print cmd status, output = commands.getstatusoutput(cmd) if status: @@ -92,14 +93,10 @@ def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict): def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt, block_device, signer_path, key): - cmd = ("system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s" % - (image_size, - verity_metadata_path, - root_hash, - salt, - block_device, - signer_path, - key)) + cmd_template = ( + "system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s") + cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt, + block_device, signer_path, key) print cmd status, output = commands.getstatusoutput(cmd) if status: @@ -125,10 +122,13 @@ def Append2Simg(sparse_image_path, unsparse_image_path, error_message): return False return True -def BuildVerifiedImage(data_image_path, verity_image_path, verity_metadata_path): - if not Append2Simg(data_image_path, verity_metadata_path, "Could not append verity metadata!"): +def BuildVerifiedImage(data_image_path, verity_image_path, + verity_metadata_path): + if not Append2Simg(data_image_path, verity_metadata_path, + "Could not append verity metadata!"): return False - if not Append2Simg(data_image_path, verity_image_path, "Could not append verity tree!"): + if not Append2Simg(data_image_path, verity_image_path, + "Could not append verity tree!"): return False return True @@ -153,7 +153,8 @@ def MakeVerityEnabledImage(out_file, prop_dict): Args: out_file: the location to write the verifiable image at - prop_dict: a dictionary of properties required for image creation and verification + prop_dict: a dictionary of properties required for image creation and + verification Returns: True on success, False otherwise. """ @@ -178,13 +179,8 @@ def MakeVerityEnabledImage(out_file, prop_dict): # build the metadata blocks root_hash = prop_dict["verity_root_hash"] salt = prop_dict["verity_salt"] - if not BuildVerityMetadata(image_size, - verity_metadata_path, - root_hash, - salt, - block_dev, - signer_path, - signer_key): + if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt, + block_dev, signer_path, signer_key): shutil.rmtree(tempdir_name, ignore_errors=True) return False @@ -237,7 +233,8 @@ def BuildImage(in_dir, prop_dict, out_file): is_verity_partition = "verity_block_device" in prop_dict verity_supported = prop_dict.get("verity") == "true" - # adjust the partition size to make room for the hashes if this is to be verified + # adjust the partition size to make room for the hashes if this is to be + # verified if verity_supported and is_verity_partition: partition_size = int(prop_dict.get("partition_size")) adjusted_size = AdjustPartitionSizeForVerity(partition_size) @@ -355,7 +352,8 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): d["mount_point"] = mount_point if mount_point == "system": copy_prop("fs_type", "fs_type") - # Copy the generic sysetem fs type first, override with specific one if available. + # Copy the generic sysetem fs type first, override with specific one if + # available. copy_prop("system_fs_type", "fs_type") copy_prop("system_size", "partition_size") copy_prop("system_journal_size", "journal_size") @@ -430,7 +428,8 @@ def main(argv): image_properties = ImagePropFromGlobalDict(glob_dict, mount_point) if not BuildImage(in_dir, image_properties, out_file): - print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir) + print >> sys.stderr, "error: failed to build %s from %s" % (out_file, + in_dir) exit(1) diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures deleted file mode 100755 index b2f46c1cb8..0000000000 --- a/tools/releasetools/check_target_files_signatures +++ /dev/null @@ -1,441 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2009 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Check the signatures of all APKs in a target_files .zip file. With --c, compare the signatures of each package to the ones in a separate -target_files (usually a previously distributed build for the same -device) and flag any changes. - -Usage: check_target_file_signatures [flags] target_files - - -c (--compare_with) - Look for compatibility problems between the two sets of target - files (eg., packages whose keys have changed). - - -l (--local_cert_dirs) - Comma-separated list of top-level directories to scan for - .x509.pem files. Defaults to "vendor,build". Where cert files - can be found that match APK signatures, the filename will be - printed as the cert name, otherwise a hash of the cert plus its - subject string will be printed instead. - - -t (--text) - Dump the certificate information for both packages in comparison - mode (this output is normally suppressed). - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import re -import shutil -import subprocess -import tempfile -import zipfile - -try: - from hashlib import sha1 as sha1 -except ImportError: - from sha import sha as sha1 - -import common - -# Work around a bug in python's zipfile module that prevents opening -# of zipfiles if any entry has an extra field of between 1 and 3 bytes -# (which is common with zipaligned APKs). This overrides the -# ZipInfo._decodeExtra() method (which contains the bug) with an empty -# version (since we don't need to decode the extra field anyway). -class MyZipInfo(zipfile.ZipInfo): - def _decodeExtra(self): - pass -zipfile.ZipInfo = MyZipInfo - -OPTIONS = common.OPTIONS - -OPTIONS.text = False -OPTIONS.compare_with = None -OPTIONS.local_cert_dirs = ("vendor", "build") - -PROBLEMS = [] -PROBLEM_PREFIX = [] - -def AddProblem(msg): - PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) -def Push(msg): - PROBLEM_PREFIX.append(msg) -def Pop(): - PROBLEM_PREFIX.pop() - - -def Banner(msg): - print "-" * 70 - print " ", msg - print "-" * 70 - - -def GetCertSubject(cert): - p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(cert) - if err and not err.strip(): - return "(error reading cert subject)" - for line in out.split("\n"): - line = line.strip() - if line.startswith("Subject:"): - return line[8:].strip() - return "(unknown cert subject)" - - -class CertDB(object): - def __init__(self): - self.certs = {} - - def Add(self, cert, name=None): - if cert in self.certs: - if name: - self.certs[cert] = self.certs[cert] + "," + name - else: - if name is None: - name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], - GetCertSubject(cert)) - self.certs[cert] = name - - def Get(self, cert): - """Return the name for a given cert.""" - return self.certs.get(cert, None) - - def FindLocalCerts(self): - to_load = [] - for top in OPTIONS.local_cert_dirs: - for dirpath, dirnames, filenames in os.walk(top): - certs = [os.path.join(dirpath, i) - for i in filenames if i.endswith(".x509.pem")] - if certs: - to_load.extend(certs) - - for i in to_load: - f = open(i) - cert = common.ParseCertificate(f.read()) - f.close() - name, _ = os.path.splitext(i) - name, _ = os.path.splitext(name) - self.Add(cert, name) - -ALL_CERTS = CertDB() - - -def CertFromPKCS7(data, filename): - """Read the cert out of a PKCS#7-format file (which is what is - stored in a signed .apk).""" - Push(filename + ":") - try: - p = common.Run(["openssl", "pkcs7", - "-inform", "DER", - "-outform", "PEM", - "-print_certs"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(data) - if err and not err.strip(): - AddProblem("error reading cert:\n" + err) - return None - - cert = common.ParseCertificate(out) - if not cert: - AddProblem("error parsing cert output") - return None - return cert - finally: - Pop() - - -class APK(object): - def __init__(self, full_filename, filename): - self.filename = filename - Push(filename+":") - try: - self.RecordCerts(full_filename) - self.ReadManifest(full_filename) - finally: - Pop() - - def RecordCerts(self, full_filename): - out = set() - try: - f = open(full_filename) - apk = zipfile.ZipFile(f, "r") - pkcs7 = None - for info in apk.infolist(): - if info.filename.startswith("META-INF/") and \ - (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): - pkcs7 = apk.read(info.filename) - cert = CertFromPKCS7(pkcs7, info.filename) - out.add(cert) - ALL_CERTS.Add(cert) - if not pkcs7: - AddProblem("no signature") - finally: - f.close() - self.certs = frozenset(out) - - def ReadManifest(self, full_filename): - p = common.Run(["aapt", "dump", "xmltree", full_filename, - "AndroidManifest.xml"], - stdout=subprocess.PIPE) - manifest, err = p.communicate() - if err: - AddProblem("failed to read manifest") - return - - self.shared_uid = None - self.package = None - - for line in manifest.split("\n"): - line = line.strip() - m = re.search('A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) - if m: - name = m.group(1) - if name == "android:sharedUserId": - if self.shared_uid is not None: - AddProblem("multiple sharedUserId declarations") - self.shared_uid = m.group(2) - elif name == "package": - if self.package is not None: - AddProblem("multiple package declarations") - self.package = m.group(2) - - if self.package is None: - AddProblem("no package declaration") - - -class TargetFiles(object): - def __init__(self): - self.max_pkg_len = 30 - self.max_fn_len = 20 - - def LoadZipFile(self, filename): - d, z = common.UnzipTemp(filename, '*.apk') - try: - self.apks = {} - self.apks_by_basename = {} - for dirpath, dirnames, filenames in os.walk(d): - for fn in filenames: - if fn.endswith(".apk"): - fullname = os.path.join(dirpath, fn) - displayname = fullname[len(d)+1:] - apk = APK(fullname, displayname) - self.apks[apk.package] = apk - self.apks_by_basename[os.path.basename(apk.filename)] = apk - - self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) - self.max_fn_len = max(self.max_fn_len, len(apk.filename)) - finally: - shutil.rmtree(d) - - self.certmap = common.ReadApkCerts(z) - z.close() - - def CheckSharedUids(self): - """Look for any instances where packages signed with different - certs request the same sharedUserId.""" - apks_by_uid = {} - for apk in self.apks.itervalues(): - if apk.shared_uid: - apks_by_uid.setdefault(apk.shared_uid, []).append(apk) - - for uid in sorted(apks_by_uid.keys()): - apks = apks_by_uid[uid] - for apk in apks[1:]: - if apk.certs != apks[0].certs: - break - else: - # all packages have the same set of certs; this uid is fine. - continue - - AddProblem("different cert sets for packages with uid %s" % (uid,)) - - print "uid %s is shared by packages with different cert sets:" % (uid,) - for apk in apks: - print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) - for cert in apk.certs: - print " ", ALL_CERTS.Get(cert) - print - - def CheckExternalSignatures(self): - for apk_filename, certname in self.certmap.iteritems(): - if certname == "EXTERNAL": - # Apps marked EXTERNAL should be signed with the test key - # during development, then manually re-signed after - # predexopting. Consider it an error if this app is now - # signed with any key that is present in our tree. - apk = self.apks_by_basename[apk_filename] - name = ALL_CERTS.Get(apk.cert) - if not name.startswith("unknown "): - Push(apk.filename) - AddProblem("hasn't been signed with EXTERNAL cert") - Pop() - - def PrintCerts(self): - """Display a table of packages grouped by cert.""" - by_cert = {} - for apk in self.apks.itervalues(): - for cert in apk.certs: - by_cert.setdefault(cert, []).append((apk.package, apk)) - - order = [(-len(v), k) for (k, v) in by_cert.iteritems()] - order.sort() - - for _, cert in order: - print "%s:" % (ALL_CERTS.Get(cert),) - apks = by_cert[cert] - apks.sort() - for _, apk in apks: - if apk.shared_uid: - print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package, - apk.shared_uid) - else: - print " %-*s %-*s" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package) - print - - def CompareWith(self, other): - """Look for instances where a given package that exists in both - self and other have different certs.""" - - all = set(self.apks.keys()) - all.update(other.apks.keys()) - - max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) - - by_certpair = {} - - for i in all: - if i in self.apks: - if i in other.apks: - # in both; should have same set of certs - if self.apks[i].certs != other.apks[i].certs: - by_certpair.setdefault((other.apks[i].certs, - self.apks[i].certs), []).append(i) - else: - print "%s [%s]: new APK (not in comparison target_files)" % ( - i, self.apks[i].filename) - else: - if i in other.apks: - print "%s [%s]: removed APK (only in comparison target_files)" % ( - i, other.apks[i].filename) - - if by_certpair: - AddProblem("some APKs changed certs") - Banner("APK signing differences") - for (old, new), packages in sorted(by_certpair.items()): - for i, o in enumerate(old): - if i == 0: - print "was", ALL_CERTS.Get(o) - else: - print " ", ALL_CERTS.Get(o) - for i, n in enumerate(new): - if i == 0: - print "now", ALL_CERTS.Get(n) - else: - print " ", ALL_CERTS.Get(n) - for i in sorted(packages): - old_fn = other.apks[i].filename - new_fn = self.apks[i].filename - if old_fn == new_fn: - print " %-*s [%s]" % (max_pkg_len, i, old_fn) - else: - print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, - old_fn, new_fn) - print - - -def main(argv): - def option_handler(o, a): - if o in ("-c", "--compare_with"): - OPTIONS.compare_with = a - elif o in ("-l", "--local_cert_dirs"): - OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] - elif o in ("-t", "--text"): - OPTIONS.text = True - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="c:l:t", - extra_long_opts=["compare_with=", - "local_cert_dirs="], - extra_option_handler=option_handler) - - if len(args) != 1: - common.Usage(__doc__) - sys.exit(1) - - ALL_CERTS.FindLocalCerts() - - Push("input target_files:") - try: - target_files = TargetFiles() - target_files.LoadZipFile(args[0]) - finally: - Pop() - - compare_files = None - if OPTIONS.compare_with: - Push("comparison target_files:") - try: - compare_files = TargetFiles() - compare_files.LoadZipFile(OPTIONS.compare_with) - finally: - Pop() - - if OPTIONS.text or not compare_files: - Banner("target files") - target_files.PrintCerts() - target_files.CheckSharedUids() - target_files.CheckExternalSignatures() - if compare_files: - if OPTIONS.text: - Banner("comparison files") - compare_files.PrintCerts() - target_files.CompareWith(compare_files) - - if PROBLEMS: - print "%d problem(s) found:\n" % (len(PROBLEMS),) - for p in PROBLEMS: - print p - return 1 - - return 0 - - -if __name__ == '__main__': - try: - r = main(sys.argv[1:]) - sys.exit(r) - except common.ExternalError, e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures new file mode 120000 index 0000000000..9f62aa3230 --- /dev/null +++ b/tools/releasetools/check_target_files_signatures @@ -0,0 +1 @@ +check_target_files_signatures.py \ No newline at end of file diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py new file mode 100755 index 0000000000..dd57033bef --- /dev/null +++ b/tools/releasetools/check_target_files_signatures.py @@ -0,0 +1,442 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Check the signatures of all APKs in a target_files .zip file. With +-c, compare the signatures of each package to the ones in a separate +target_files (usually a previously distributed build for the same +device) and flag any changes. + +Usage: check_target_file_signatures [flags] target_files + + -c (--compare_with) + Look for compatibility problems between the two sets of target + files (eg., packages whose keys have changed). + + -l (--local_cert_dirs) + Comma-separated list of top-level directories to scan for + .x509.pem files. Defaults to "vendor,build". Where cert files + can be found that match APK signatures, the filename will be + printed as the cert name, otherwise a hash of the cert plus its + subject string will be printed instead. + + -t (--text) + Dump the certificate information for both packages in comparison + mode (this output is normally suppressed). + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import re +import shutil +import subprocess +import zipfile + +import common + +# Work around a bug in python's zipfile module that prevents opening +# of zipfiles if any entry has an extra field of between 1 and 3 bytes +# (which is common with zipaligned APKs). This overrides the +# ZipInfo._decodeExtra() method (which contains the bug) with an empty +# version (since we don't need to decode the extra field anyway). +class MyZipInfo(zipfile.ZipInfo): + def _decodeExtra(self): + pass +zipfile.ZipInfo = MyZipInfo + +OPTIONS = common.OPTIONS + +OPTIONS.text = False +OPTIONS.compare_with = None +OPTIONS.local_cert_dirs = ("vendor", "build") + +PROBLEMS = [] +PROBLEM_PREFIX = [] + +def AddProblem(msg): + PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) +def Push(msg): + PROBLEM_PREFIX.append(msg) +def Pop(): + PROBLEM_PREFIX.pop() + + +def Banner(msg): + print "-" * 70 + print " ", msg + print "-" * 70 + + +def GetCertSubject(cert): + p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(cert) + if err and not err.strip(): + return "(error reading cert subject)" + for line in out.split("\n"): + line = line.strip() + if line.startswith("Subject:"): + return line[8:].strip() + return "(unknown cert subject)" + + +class CertDB(object): + def __init__(self): + self.certs = {} + + def Add(self, cert, name=None): + if cert in self.certs: + if name: + self.certs[cert] = self.certs[cert] + "," + name + else: + if name is None: + name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], + GetCertSubject(cert)) + self.certs[cert] = name + + def Get(self, cert): + """Return the name for a given cert.""" + return self.certs.get(cert, None) + + def FindLocalCerts(self): + to_load = [] + for top in OPTIONS.local_cert_dirs: + for dirpath, _, filenames in os.walk(top): + certs = [os.path.join(dirpath, i) + for i in filenames if i.endswith(".x509.pem")] + if certs: + to_load.extend(certs) + + for i in to_load: + f = open(i) + cert = common.ParseCertificate(f.read()) + f.close() + name, _ = os.path.splitext(i) + name, _ = os.path.splitext(name) + self.Add(cert, name) + +ALL_CERTS = CertDB() + + +def CertFromPKCS7(data, filename): + """Read the cert out of a PKCS#7-format file (which is what is + stored in a signed .apk).""" + Push(filename + ":") + try: + p = common.Run(["openssl", "pkcs7", + "-inform", "DER", + "-outform", "PEM", + "-print_certs"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(data) + if err and not err.strip(): + AddProblem("error reading cert:\n" + err) + return None + + cert = common.ParseCertificate(out) + if not cert: + AddProblem("error parsing cert output") + return None + return cert + finally: + Pop() + + +class APK(object): + def __init__(self, full_filename, filename): + self.filename = filename + self.certs = None + self.shared_uid = None + self.package = None + + Push(filename+":") + try: + self.RecordCerts(full_filename) + self.ReadManifest(full_filename) + finally: + Pop() + + def RecordCerts(self, full_filename): + out = set() + try: + f = open(full_filename) + apk = zipfile.ZipFile(f, "r") + pkcs7 = None + for info in apk.infolist(): + if info.filename.startswith("META-INF/") and \ + (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): + pkcs7 = apk.read(info.filename) + cert = CertFromPKCS7(pkcs7, info.filename) + out.add(cert) + ALL_CERTS.Add(cert) + if not pkcs7: + AddProblem("no signature") + finally: + f.close() + self.certs = frozenset(out) + + def ReadManifest(self, full_filename): + p = common.Run(["aapt", "dump", "xmltree", full_filename, + "AndroidManifest.xml"], + stdout=subprocess.PIPE) + manifest, err = p.communicate() + if err: + AddProblem("failed to read manifest") + return + + self.shared_uid = None + self.package = None + + for line in manifest.split("\n"): + line = line.strip() + m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) + if m: + name = m.group(1) + if name == "android:sharedUserId": + if self.shared_uid is not None: + AddProblem("multiple sharedUserId declarations") + self.shared_uid = m.group(2) + elif name == "package": + if self.package is not None: + AddProblem("multiple package declarations") + self.package = m.group(2) + + if self.package is None: + AddProblem("no package declaration") + + +class TargetFiles(object): + def __init__(self): + self.max_pkg_len = 30 + self.max_fn_len = 20 + self.apks = None + self.apks_by_basename = None + self.certmap = None + + def LoadZipFile(self, filename): + d, z = common.UnzipTemp(filename, '*.apk') + try: + self.apks = {} + self.apks_by_basename = {} + for dirpath, _, filenames in os.walk(d): + for fn in filenames: + if fn.endswith(".apk"): + fullname = os.path.join(dirpath, fn) + displayname = fullname[len(d)+1:] + apk = APK(fullname, displayname) + self.apks[apk.package] = apk + self.apks_by_basename[os.path.basename(apk.filename)] = apk + + self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) + self.max_fn_len = max(self.max_fn_len, len(apk.filename)) + finally: + shutil.rmtree(d) + + self.certmap = common.ReadApkCerts(z) + z.close() + + def CheckSharedUids(self): + """Look for any instances where packages signed with different + certs request the same sharedUserId.""" + apks_by_uid = {} + for apk in self.apks.itervalues(): + if apk.shared_uid: + apks_by_uid.setdefault(apk.shared_uid, []).append(apk) + + for uid in sorted(apks_by_uid.keys()): + apks = apks_by_uid[uid] + for apk in apks[1:]: + if apk.certs != apks[0].certs: + break + else: + # all packages have the same set of certs; this uid is fine. + continue + + AddProblem("different cert sets for packages with uid %s" % (uid,)) + + print "uid %s is shared by packages with different cert sets:" % (uid,) + for apk in apks: + print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) + for cert in apk.certs: + print " ", ALL_CERTS.Get(cert) + print + + def CheckExternalSignatures(self): + for apk_filename, certname in self.certmap.iteritems(): + if certname == "EXTERNAL": + # Apps marked EXTERNAL should be signed with the test key + # during development, then manually re-signed after + # predexopting. Consider it an error if this app is now + # signed with any key that is present in our tree. + apk = self.apks_by_basename[apk_filename] + name = ALL_CERTS.Get(apk.cert) + if not name.startswith("unknown "): + Push(apk.filename) + AddProblem("hasn't been signed with EXTERNAL cert") + Pop() + + def PrintCerts(self): + """Display a table of packages grouped by cert.""" + by_cert = {} + for apk in self.apks.itervalues(): + for cert in apk.certs: + by_cert.setdefault(cert, []).append((apk.package, apk)) + + order = [(-len(v), k) for (k, v) in by_cert.iteritems()] + order.sort() + + for _, cert in order: + print "%s:" % (ALL_CERTS.Get(cert),) + apks = by_cert[cert] + apks.sort() + for _, apk in apks: + if apk.shared_uid: + print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package, + apk.shared_uid) + else: + print " %-*s %-*s" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package) + print + + def CompareWith(self, other): + """Look for instances where a given package that exists in both + self and other have different certs.""" + + all_apks = set(self.apks.keys()) + all_apks.update(other.apks.keys()) + + max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) + + by_certpair = {} + + for i in all: + if i in self.apks: + if i in other.apks: + # in both; should have same set of certs + if self.apks[i].certs != other.apks[i].certs: + by_certpair.setdefault((other.apks[i].certs, + self.apks[i].certs), []).append(i) + else: + print "%s [%s]: new APK (not in comparison target_files)" % ( + i, self.apks[i].filename) + else: + if i in other.apks: + print "%s [%s]: removed APK (only in comparison target_files)" % ( + i, other.apks[i].filename) + + if by_certpair: + AddProblem("some APKs changed certs") + Banner("APK signing differences") + for (old, new), packages in sorted(by_certpair.items()): + for i, o in enumerate(old): + if i == 0: + print "was", ALL_CERTS.Get(o) + else: + print " ", ALL_CERTS.Get(o) + for i, n in enumerate(new): + if i == 0: + print "now", ALL_CERTS.Get(n) + else: + print " ", ALL_CERTS.Get(n) + for i in sorted(packages): + old_fn = other.apks[i].filename + new_fn = self.apks[i].filename + if old_fn == new_fn: + print " %-*s [%s]" % (max_pkg_len, i, old_fn) + else: + print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, + old_fn, new_fn) + print + + +def main(argv): + def option_handler(o, a): + if o in ("-c", "--compare_with"): + OPTIONS.compare_with = a + elif o in ("-l", "--local_cert_dirs"): + OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] + elif o in ("-t", "--text"): + OPTIONS.text = True + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="c:l:t", + extra_long_opts=["compare_with=", + "local_cert_dirs="], + extra_option_handler=option_handler) + + if len(args) != 1: + common.Usage(__doc__) + sys.exit(1) + + ALL_CERTS.FindLocalCerts() + + Push("input target_files:") + try: + target_files = TargetFiles() + target_files.LoadZipFile(args[0]) + finally: + Pop() + + compare_files = None + if OPTIONS.compare_with: + Push("comparison target_files:") + try: + compare_files = TargetFiles() + compare_files.LoadZipFile(OPTIONS.compare_with) + finally: + Pop() + + if OPTIONS.text or not compare_files: + Banner("target files") + target_files.PrintCerts() + target_files.CheckSharedUids() + target_files.CheckExternalSignatures() + if compare_files: + if OPTIONS.text: + Banner("comparison files") + compare_files.PrintCerts() + target_files.CompareWith(compare_files) + + if PROBLEMS: + print "%d problem(s) found:\n" % (len(PROBLEMS),) + for p in PROBLEMS: + print p + return 1 + + return 0 + + +if __name__ == '__main__': + try: + r = main(sys.argv[1:]) + sys.exit(r) + except common.ExternalError as e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 6903dc6626..63e438af81 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -30,44 +30,45 @@ import time import zipfile import blockimgdiff -from rangelib import * +import rangelib try: from hashlib import sha1 as sha1 except ImportError: from sha import sha as sha1 -# missing in Python 2.4 and before -if not hasattr(os, "SEEK_SET"): - os.SEEK_SET = 0 -class Options(object): pass -OPTIONS = Options() - -DEFAULT_SEARCH_PATH_BY_PLATFORM = { - "linux2": "out/host/linux-x86", - "darwin": "out/host/darwin-x86", +class Options(object): + def __init__(self): + platform_search_path = { + "linux2": "out/host/linux-x86", + "darwin": "out/host/darwin-x86", } -OPTIONS.search_path = DEFAULT_SEARCH_PATH_BY_PLATFORM.get(sys.platform, None) -OPTIONS.signapk_path = "framework/signapk.jar" # Relative to search_path -OPTIONS.extra_signapk_args = [] -OPTIONS.java_path = "java" # Use the one on the path by default. -OPTIONS.java_args = "-Xmx2048m" # JVM Args -OPTIONS.public_key_suffix = ".x509.pem" -OPTIONS.private_key_suffix = ".pk8" -OPTIONS.verbose = False -OPTIONS.tempfiles = [] -OPTIONS.device_specific = None -OPTIONS.extras = {} -OPTIONS.info_dict = None + self.search_path = platform_search_path.get(sys.platform, None) + self.signapk_path = "framework/signapk.jar" # Relative to search_path + self.extra_signapk_args = [] + self.java_path = "java" # Use the one on the path by default. + self.java_args = "-Xmx2048m" # JVM Args + self.public_key_suffix = ".x509.pem" + self.private_key_suffix = ".pk8" + self.verbose = False + self.tempfiles = [] + self.device_specific = None + self.extras = {} + self.info_dict = None + self.worker_threads = None + + +OPTIONS = Options() # Values for "certificate" in apkcerts that mean special things. SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") -class ExternalError(RuntimeError): pass +class ExternalError(RuntimeError): + pass def Run(args, **kwargs): @@ -94,19 +95,19 @@ def CloseInheritedPipes(): pass -def LoadInfoDict(input): +def LoadInfoDict(input_file): """Read and parse the META/misc_info.txt key/value pairs from the input target files and return a dict.""" def read_helper(fn): - if isinstance(input, zipfile.ZipFile): - return input.read(fn) + if isinstance(input_file, zipfile.ZipFile): + return input_file.read(fn) else: - path = os.path.join(input, *fn.split("/")) + path = os.path.join(input_file, *fn.split("/")) try: with open(path) as f: return f.read() - except IOError, e: + except IOError as e: if e.errno == errno.ENOENT: raise KeyError(fn) d = {} @@ -122,14 +123,16 @@ def LoadInfoDict(input): if "mkyaffs2_extra_flags" not in d: try: - d["mkyaffs2_extra_flags"] = read_helper("META/mkyaffs2-extra-flags.txt").strip() + d["mkyaffs2_extra_flags"] = read_helper( + "META/mkyaffs2-extra-flags.txt").strip() except KeyError: # ok if flags don't exist pass if "recovery_api_version" not in d: try: - d["recovery_api_version"] = read_helper("META/recovery-api-version.txt").strip() + d["recovery_api_version"] = read_helper( + "META/recovery-api-version.txt").strip() except KeyError: raise ValueError("can't find recovery API version in input target-files") @@ -146,9 +149,11 @@ def LoadInfoDict(input): try: data = read_helper("META/imagesizes.txt") for line in data.split("\n"): - if not line: continue + if not line: + continue name, value = line.split(" ", 1) - if not value: continue + if not value: + continue if name == "blocksize": d[name] = value else: @@ -186,7 +191,8 @@ def LoadDictionaryFromLines(lines): d = {} for line in lines: line = line.strip() - if not line or line.startswith("#"): continue + if not line or line.startswith("#"): + continue if "=" in line: name, value = line.split("=", 1) d[name] = value @@ -194,7 +200,12 @@ def LoadDictionaryFromLines(lines): def LoadRecoveryFSTab(read_helper, fstab_version): class Partition(object): - pass + def __init__(self, mount_point, fs_type, device, length, device2): + self.mount_point = mount_point + self.fs_type = fs_type + self.device = device + self.length = length + self.device2 = device2 try: data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab") @@ -206,68 +217,65 @@ def LoadRecoveryFSTab(read_helper, fstab_version): d = {} for line in data.split("\n"): line = line.strip() - if not line or line.startswith("#"): continue + if not line or line.startswith("#"): + continue pieces = line.split() - if not (3 <= len(pieces) <= 4): + if not 3 <= len(pieces) <= 4: raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) - - p = Partition() - p.mount_point = pieces[0] - p.fs_type = pieces[1] - p.device = pieces[2] - p.length = 0 options = None if len(pieces) >= 4: if pieces[3].startswith("/"): - p.device2 = pieces[3] + device2 = pieces[3] if len(pieces) >= 5: options = pieces[4] else: - p.device2 = None + device2 = None options = pieces[3] else: - p.device2 = None + device2 = None + mount_point = pieces[0] + length = 0 if options: options = options.split(",") for i in options: if i.startswith("length="): - p.length = int(i[7:]) + length = int(i[7:]) else: - print "%s: unknown option \"%s\"" % (p.mount_point, i) + print "%s: unknown option \"%s\"" % (mount_point, i) - d[p.mount_point] = p + d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1], + device=pieces[2], length=length, + device2=device2) elif fstab_version == 2: d = {} for line in data.split("\n"): line = line.strip() - if not line or line.startswith("#"): continue + if not line or line.startswith("#"): + continue pieces = line.split() if len(pieces) != 5: raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) # Ignore entries that are managed by vold options = pieces[4] - if "voldmanaged=" in options: continue + if "voldmanaged=" in options: + continue # It's a good line, parse it - p = Partition() - p.device = pieces[0] - p.mount_point = pieces[1] - p.fs_type = pieces[2] - p.device2 = None - p.length = 0 - + length = 0 options = options.split(",") for i in options: if i.startswith("length="): - p.length = int(i[7:]) + length = int(i[7:]) else: # Ignore all unknown options in the unified fstab continue - d[p.mount_point] = p + mount_point = pieces[1] + d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], + device=pieces[0], length=length, device2=None) else: raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,)) @@ -279,6 +287,7 @@ def DumpInfoDict(d): for k, v in sorted(d.items()): print "%-25s = (%s) %s" % (k, type(v).__name__, v) + def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): """Take a kernel, cmdline, and ramdisk directory from the input (in 'sourcedir'), and turn them into a boot image. Return the image @@ -305,8 +314,8 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): p2.wait() p1.wait() - assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,) - assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,) + assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) + assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,) # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" @@ -347,7 +356,8 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): if info_dict.get("verity_key", None): path = "/" + os.path.basename(sourcedir).lower() - cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8", info_dict["verity_key"] + ".x509.pem", img.name] + cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8", + info_dict["verity_key"] + ".x509.pem", img.name] p = Run(cmd, stdout=subprocess.PIPE) p.communicate() assert p.returncode == 0, "boot_signer of %s image failed" % path @@ -453,7 +463,7 @@ def GetKeyPasswords(keylist): stdin=devnull.fileno(), stdout=devnull.fileno(), stderr=subprocess.PIPE) - stdout, stderr = p.communicate() + _, stderr = p.communicate() if p.returncode == 0: # Encrypted key with empty string as password. key_passwords[k] = '' @@ -524,20 +534,23 @@ def CheckSize(data, target, info_dict): any, for the given target. Raise exception if the data is too big. Print a warning if the data is nearing the maximum size.""" - if target.endswith(".img"): target = target[:-4] + if target.endswith(".img"): + target = target[:-4] mount_point = "/" + target fs_type = None limit = None if info_dict["fstab"]: - if mount_point == "/userdata": mount_point = "/data" + if mount_point == "/userdata": + mount_point = "/data" p = info_dict["fstab"][mount_point] fs_type = p.fs_type device = p.device if "/" in device: device = device[device.rfind("/")+1:] limit = info_dict.get(device + "_size", None) - if not fs_type or not limit: return + if not fs_type or not limit: + return if fs_type == "yaffs2": # image size should be increased by 1/64th to account for the @@ -562,7 +575,8 @@ def ReadApkCerts(tf_zip): certmap = {} for line in tf_zip.read("META/apkcerts.txt").split("\n"): line = line.strip() - if not line: continue + if not line: + continue m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' r'private_key="(.*)"$', line) if m: @@ -622,13 +636,11 @@ def ParseOptions(argv, "java_path=", "java_args=", "public_key_suffix=", "private_key_suffix=", "device_specific=", "extra="] + list(extra_long_opts)) - except getopt.GetoptError, err: + except getopt.GetoptError as err: Usage(docstring) print "**", str(err), "**" sys.exit(2) - path_specified = False - for o, a in opts: if o in ("-h", "--help"): Usage(docstring) @@ -707,7 +719,8 @@ class PasswordManager(object): if i not in current or not current[i]: missing.append(i) # Are all the passwords already in the file? - if not missing: return current + if not missing: + return current for i in missing: current[i] = "" @@ -721,7 +734,7 @@ class PasswordManager(object): current = self.UpdateAndReadFile(current) - def PromptResult(self, current): + def PromptResult(self, current): # pylint: disable=no-self-use """Prompt the user to enter a value (password) for each key in 'current' whose value is fales. Returns a new dict with all the values. @@ -732,9 +745,10 @@ class PasswordManager(object): result[k] = v else: while True: - result[k] = getpass.getpass("Enter password for %s key> " - % (k,)).strip() - if result[k]: break + result[k] = getpass.getpass( + "Enter password for %s key> " % k).strip() + if result[k]: + break return result def UpdateAndReadFile(self, current): @@ -742,14 +756,13 @@ class PasswordManager(object): return self.PromptResult(current) f = open(self.pwfile, "w") - os.chmod(self.pwfile, 0600) + os.chmod(self.pwfile, 0o600) f.write("# Enter key passwords between the [[[ ]]] brackets.\n") f.write("# (Additional spaces are harmless.)\n\n") first_line = None - sorted = [(not v, k, v) for (k, v) in current.iteritems()] - sorted.sort() - for i, (_, k, v) in enumerate(sorted): + sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()]) + for i, (_, k, v) in enumerate(sorted_list): f.write("[[[ %s ]]] %s\n" % (v, k)) if not v and first_line is None: # position cursor on first line with no password. @@ -763,19 +776,21 @@ class PasswordManager(object): def ReadFile(self): result = {} - if self.pwfile is None: return result + if self.pwfile is None: + return result try: f = open(self.pwfile, "r") for line in f: line = line.strip() - if not line or line[0] == '#': continue + if not line or line[0] == '#': + continue m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) if not m: print "failed to parse password file: ", line else: result[m.group(2)] = m.group(1) f.close() - except IOError, e: + except IOError as e: if e.errno != errno.ENOENT: print "error reading password file: ", str(e) return result @@ -821,16 +836,16 @@ def ZipWrite(zip_file, filename, arcname=None, perms=0o644, zipfile.ZIP64_LIMIT = saved_zip64_limit -def ZipWriteStr(zip, filename, data, perms=0644, compression=None): +def ZipWriteStr(zip_file, filename, data, perms=0o644, compression=None): # use a fixed timestamp so the output is repeatable. zinfo = zipfile.ZipInfo(filename=filename, date_time=(2009, 1, 1, 0, 0, 0)) if compression is None: - zinfo.compress_type = zip.compression + zinfo.compress_type = zip_file.compression else: zinfo.compress_type = compression zinfo.external_attr = perms << 16 - zip.writestr(zinfo, data) + zip_file.writestr(zinfo, data) class DeviceSpecificParams(object): @@ -845,7 +860,8 @@ class DeviceSpecificParams(object): if self.module is None: path = OPTIONS.device_specific - if not path: return + if not path: + return try: if os.path.isdir(path): info = imp.find_module("releasetools", [path]) @@ -983,7 +999,8 @@ class Difference(object): err = [] def run(): _, e = p.communicate() - if e: err.append(e) + if e: + err.append(e) th = threading.Thread(target=run) th.start() th.join(timeout=300) # 5 mins @@ -1050,7 +1067,7 @@ def ComputeDifferences(diffs): print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name) lock.release() - except Exception, e: + except Exception as e: print e raise @@ -1063,8 +1080,9 @@ def ComputeDifferences(diffs): threads.pop().join() -class BlockDifference: - def __init__(self, partition, tgt, src=None, check_first_block=False, version=None): +class BlockDifference(object): + def __init__(self, partition, tgt, src=None, check_first_block=False, + version=None): self.tgt = tgt self.src = src self.partition = partition @@ -1094,7 +1112,8 @@ class BlockDifference: else: script.Print("Patching %s image after verification." % (self.partition,)) - if progress: script.ShowProgress(progress, 0) + if progress: + script.ShowProgress(progress, 0) self._WriteUpdate(script, output_zip) def WriteVerifyScript(self, script): @@ -1108,11 +1127,11 @@ class BlockDifference: '"%s.new.dat", "%s.patch.dat") then') % (self.device, partition, partition, partition)) else: - script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % - (self.device, self.src.care_map.to_string_raw(), - self.src.TotalSha1())) + script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( + self.device, self.src.care_map.to_string_raw(), + self.src.TotalSha1())) script.Print('Verified %s image...' % (partition,)) - script.AppendExtra('else'); + script.AppendExtra('else') # When generating incrementals for the system and vendor partitions, # explicitly check the first block (which contains the superblock) of @@ -1147,9 +1166,9 @@ class BlockDifference: 'package_extract_file("{partition}.transfer.list"), ' '"{partition}.new.dat", "{partition}.patch.dat");\n'.format( device=self.device, partition=self.partition)) - script.AppendExtra(script._WordWrap(call)) + script.AppendExtra(script.WordWrap(call)) - def _HashBlocks(self, source, ranges): + def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use data = source.ReadRangeSet(ranges) ctx = sha1() @@ -1159,8 +1178,8 @@ class BlockDifference: return ctx.hexdigest() def _CheckFirstBlock(self, script): - r = RangeSet((0, 1)) - srchash = self._HashBlocks(self.src, r); + r = rangelib.RangeSet((0, 1)) + srchash = self._HashBlocks(self.src, r) script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || ' 'abort("%s has been remounted R/W; ' @@ -1172,16 +1191,21 @@ DataImage = blockimgdiff.DataImage # map recovery.fstab's fs_types to mount/format "partition types" -PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD", - "ext4": "EMMC", "emmc": "EMMC", - "f2fs": "EMMC" } +PARTITION_TYPES = { + "yaffs2": "MTD", + "mtd": "MTD", + "ext4": "EMMC", + "emmc": "EMMC", + "f2fs": "EMMC" +} def GetTypeAndDevice(mount_point, info): fstab = info["fstab"] if fstab: - return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device + return (PARTITION_TYPES[fstab[mount_point].fs_type], + fstab[mount_point].device) else: - return None + raise KeyError def ParseCertificate(data): @@ -1243,16 +1267,15 @@ if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(rec else log -t recovery "Recovery image already installed" fi -""" % { 'boot_size': boot_img.size, - 'boot_sha1': boot_img.sha1, - 'recovery_size': recovery_img.size, - 'recovery_sha1': recovery_img.sha1, - 'boot_type': boot_type, - 'boot_device': boot_device, - 'recovery_type': recovery_type, - 'recovery_device': recovery_device, - 'bonus_args': bonus_args, - } +""" % {'boot_size': boot_img.size, + 'boot_sha1': boot_img.sha1, + 'recovery_size': recovery_img.size, + 'recovery_sha1': recovery_img.sha1, + 'boot_type': boot_type, + 'boot_device': boot_device, + 'recovery_type': recovery_type, + 'recovery_device': recovery_device, + 'bonus_args': bonus_args} # The install script location moved from /system/etc to /system/bin # in the L release. Parse the init.rc file to find out where the @@ -1261,12 +1284,12 @@ fi try: with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f: for line in f: - m = re.match("^service flash_recovery /system/(\S+)\s*$", line) + m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line) if m: sh_location = m.group(1) print "putting script in", sh_location break - except (OSError, IOError), e: + except (OSError, IOError) as e: print "failed to read init.rc: %s" % (e,) output_sink(sh_location, sh) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 934d751f48..3d0da88fe5 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import re import common @@ -36,7 +35,7 @@ class EdifyGenerator(object): return x @staticmethod - def _WordWrap(cmd, linelen=80): + def WordWrap(cmd, linelen=80): """'cmd' should be a function call with null characters after each parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd to a given line length, replacing nulls with spaces and/or newlines @@ -77,32 +76,30 @@ class EdifyGenerator(object): cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || ' 'abort("This package expects the value \\"{value}\\" for ' '\\"{name}\\" on the OEM partition; this has value \\"" + ' - 'file_getprop("/oem/oem.prop", "{name}") + "\\".");' - ).format(name=name, value=value) + 'file_getprop("/oem/oem.prop", "{name}") + "\\".");').format( + name=name, value=value) self.script.append(cmd) def AssertSomeFingerprint(self, *fp): """Assert that the current recovery build fingerprint is one of *fp.""" if not fp: raise ValueError("must specify some fingerprints") - cmd = ( - ' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"') - % i for i in fp]) + + cmd = (' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"') % i + for i in fp]) + ' ||\n abort("Package expects build fingerprint of %s; this ' - 'device has " + getprop("ro.build.fingerprint") + ".");' - ) % (" or ".join(fp),) + 'device has " + getprop("ro.build.fingerprint") + ".");') % ( + " or ".join(fp)) self.script.append(cmd) def AssertSomeThumbprint(self, *fp): """Assert that the current recovery build thumbprint is one of *fp.""" if not fp: raise ValueError("must specify some thumbprints") - cmd = ( - ' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"') - % i for i in fp]) + + cmd = (' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"') % i + for i in fp]) + ' ||\n abort("Package expects build thumbprint of %s; this ' - 'device has " + getprop("ro.build.thumbprint") + ".");' - ) % (" or ".join(fp),) + 'device has " + getprop("ro.build.thumbprint") + ".");') % ( + " or ".join(fp)) self.script.append(cmd) def AssertOlderBuild(self, timestamp, timestamp_text): @@ -111,15 +108,15 @@ class EdifyGenerator(object): self.script.append( ('(!less_than_int(%s, getprop("ro.build.date.utc"))) || ' 'abort("Can\'t install this package (%s) over newer ' - 'build (" + getprop("ro.build.date") + ").");' - ) % (timestamp, timestamp_text)) + 'build (" + getprop("ro.build.date") + ").");') % (timestamp, + timestamp_text)) def AssertDevice(self, device): """Assert that the device identifier is the given string.""" cmd = ('getprop("ro.product.device") == "%s" || ' 'abort("This package is for \\"%s\\" devices; ' - 'this is a \\"" + getprop("ro.product.device") + "\\".");' - ) % (device, device) + 'this is a \\"" + getprop("ro.product.device") + "\\".");') % ( + device, device) self.script.append(cmd) def AssertSomeBootloader(self, *bootloaders): @@ -128,7 +125,7 @@ class EdifyGenerator(object): " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,) for b in bootloaders]) + ");") - self.script.append(self._WordWrap(cmd)) + self.script.append(self.WordWrap(cmd)) def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next @@ -180,9 +177,9 @@ class EdifyGenerator(object): if "=" in option: key, value = option.split("=", 1) mount_dict[key] = value - self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % - (p.fs_type, common.PARTITION_TYPES[p.fs_type], - p.device, p.mount_point, mount_dict.get(p.fs_type, ""))) + self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % ( + p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device, + p.mount_point, mount_dict.get(p.fs_type, ""))) self.mounts.add(p.mount_point) def UnpackPackageDir(self, src, dst): @@ -205,18 +202,17 @@ class EdifyGenerator(object): fstab = self.info.get("fstab", None) if fstab: p = fstab[partition] - if (p.fs_type not in ( "ext2", "ext3", "ext4")): + if p.fs_type not in ("ext2", "ext3", "ext4"): raise ValueError("Partition %s cannot be tuned\n" % (partition,)) - self.script.append('tune2fs(' + - "".join(['"%s", ' % (i,) for i in options]) + - '"%s") || abort("Failed to tune partition %s");' - % ( p.device,partition)); + self.script.append( + 'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) + + '"%s") || abort("Failed to tune partition %s");' % ( + p.device, partition)) def FormatPartition(self, partition): """Format the given partition, specified by its mount point (eg, "/system").""" - reserve_size = 0 fstab = self.info.get("fstab", None) if fstab: p = fstab[partition] @@ -235,9 +231,10 @@ class EdifyGenerator(object): def DeleteFiles(self, file_list): """Delete all files in file_list.""" - if not file_list: return + if not file_list: + return cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");" - self.script.append(self._WordWrap(cmd)) + self.script.append(self.WordWrap(cmd)) def RenameFile(self, srcfile, tgtfile): """Moves a file from one location to another.""" @@ -251,7 +248,7 @@ class EdifyGenerator(object): skip the action if the file exists. Used when a patch is later renamed.""" cmd = ('sha1_check(read_file("%s"), %s) || ' % (tgtfile, tgtsha1)) - self.script.append(self._WordWrap(cmd)) + self.script.append(self.WordWrap(cmd)) def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs): """Apply binary patches (in *patchpairs) to the given srcfile to @@ -265,7 +262,7 @@ class EdifyGenerator(object): cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2]) cmd.append(');') cmd = "".join(cmd) - self.script.append(self._WordWrap(cmd)) + self.script.append(self.WordWrap(cmd)) def WriteRawImage(self, mount_point, fn, mapfn=None): """Write the given package file into the partition for the given @@ -289,33 +286,37 @@ class EdifyGenerator(object): self.script.append( 'package_extract_file("%(fn)s", "%(device)s");' % args) else: - raise ValueError("don't know how to write \"%s\" partitions" % (p.fs_type,)) + raise ValueError( + "don't know how to write \"%s\" partitions" % p.fs_type) def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities): """Set file ownership and permissions.""" if not self.info.get("use_set_metadata", False): self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn)) else: - if capabilities is None: capabilities = "0x0" + if capabilities is None: + capabilities = "0x0" cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \ '"capabilities", %s' % (fn, uid, gid, mode, capabilities) if selabel is not None: - cmd += ', "selabel", "%s"' % ( selabel ) + cmd += ', "selabel", "%s"' % selabel cmd += ');' self.script.append(cmd) - def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel, capabilities): + def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel, + capabilities): """Recursively set path ownership and permissions.""" if not self.info.get("use_set_metadata", False): self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");' % (uid, gid, dmode, fmode, fn)) else: - if capabilities is None: capabilities = "0x0" + if capabilities is None: + capabilities = "0x0" cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \ '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \ % (fn, uid, gid, dmode, fmode, capabilities) if selabel is not None: - cmd += ', "selabel", "%s"' % ( selabel ) + cmd += ', "selabel", "%s"' % selabel cmd += ');' self.script.append(cmd) @@ -328,15 +329,15 @@ class EdifyGenerator(object): for dest, links in sorted(by_dest.iteritems()): cmd = ('symlink("%s", ' % (dest,) + ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");") - self.script.append(self._WordWrap(cmd)) + self.script.append(self.WordWrap(cmd)) def AppendExtra(self, extra): """Append text verbatim to the output script.""" self.script.append(extra) def Unmount(self, mount_point): - self.script.append('unmount("%s");' % (mount_point,)) - self.mounts.remove(mount_point); + self.script.append('unmount("%s");' % mount_point) + self.mounts.remove(mount_point) def UnmountAll(self): for p in sorted(self.mounts): @@ -359,4 +360,4 @@ class EdifyGenerator(object): else: data = open(input_path, "rb").read() common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary", - data, perms=0755) + data, perms=0o755) diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py index 4dda0b7520..8c5acd882c 100755 --- a/tools/releasetools/img_from_target_files.py +++ b/tools/releasetools/img_from_target_files.py @@ -32,18 +32,10 @@ if sys.hexversion < 0x02070000: print >> sys.stderr, "Python 2.7 or newer is required." sys.exit(1) -import errno import os -import re import shutil -import subprocess -import tempfile import zipfile -# missing in Python 2.4 and before -if not hasattr(os, "SEEK_SET"): - os.SEEK_SET = 0 - import common OPTIONS = common.OPTIONS @@ -58,7 +50,7 @@ def CopyInfo(output_zip): def main(argv): bootable_only = [False] - def option_handler(o, a): + def option_handler(o, _): if o in ("-z", "--bootable_zip"): bootable_only[0] = True else: @@ -116,7 +108,7 @@ def main(argv): boot_image = common.GetBootableImage( "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") if boot_image: - boot_image.AddToZip(output_zip) + boot_image.AddToZip(output_zip) recovery_image = common.GetBootableImage( "recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY") if recovery_image: @@ -157,7 +149,7 @@ if __name__ == '__main__': try: common.CloseInheritedPipes() main(sys.argv[1:]) - except common.ExternalError, e: + except common.ExternalError as e: print print " ERROR: %s" % (e,) print diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch deleted file mode 100755 index 08d1450088..0000000000 --- a/tools/releasetools/make_recovery_patch +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2014 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import common - -OPTIONS = common.OPTIONS - -def main(argv): - # def option_handler(o, a): - # return False - - args = common.ParseOptions(argv, __doc__) - input_dir, output_dir = args - - OPTIONS.info_dict = common.LoadInfoDict(input_dir) - - recovery_img = common.GetBootableImage("recovery.img", "recovery.img", - input_dir, "RECOVERY") - boot_img = common.GetBootableImage("boot.img", "boot.img", - input_dir, "BOOT") - - if not recovery_img or not boot_img: - sys.exit(0) - - def output_sink(fn, data): - with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: - f.write(data) - - common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch new file mode 120000 index 0000000000..45cec0862e --- /dev/null +++ b/tools/releasetools/make_recovery_patch @@ -0,0 +1 @@ +make_recovery_patch.py \ No newline at end of file diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py new file mode 100755 index 0000000000..08d1450088 --- /dev/null +++ b/tools/releasetools/make_recovery_patch.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import common + +OPTIONS = common.OPTIONS + +def main(argv): + # def option_handler(o, a): + # return False + + args = common.ParseOptions(argv, __doc__) + input_dir, output_dir = args + + OPTIONS.info_dict = common.LoadInfoDict(input_dir) + + recovery_img = common.GetBootableImage("recovery.img", "recovery.img", + input_dir, "RECOVERY") + boot_img = common.GetBootableImage("boot.img", "boot.img", + input_dir, "BOOT") + + if not recovery_img or not boot_img: + sys.exit(0) + + def output_sink(fn, data): + with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: + f.write(data) + + common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files deleted file mode 100755 index b71baf93ae..0000000000 --- a/tools/releasetools/ota_from_target_files +++ /dev/null @@ -1,1575 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2008 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Given a target-files zipfile, produces an OTA package that installs -that build. An incremental OTA is produced if -i is given, otherwise -a full OTA is produced. - -Usage: ota_from_target_files [flags] input_target_files output_ota_package - - --board_config - Deprecated. - - -k (--package_key) Key to use to sign the package (default is - the value of default_system_dev_certificate from the input - target-files's META/misc_info.txt, or - "build/target/product/security/testkey" if that value is not - specified). - - For incremental OTAs, the default value is based on the source - target-file, not the target build. - - -i (--incremental_from) - Generate an incremental OTA using the given target-files zip as - the starting build. - - -v (--verify) - Remount and verify the checksums of the files written to the - system and vendor (if used) partitions. Incremental builds only. - - -o (--oem_settings) - Use the file to specify the expected OEM-specific properties - on the OEM partition of the intended device. - - -w (--wipe_user_data) - Generate an OTA package that will wipe the user data partition - when installed. - - -n (--no_prereq) - Omit the timestamp prereq check normally included at the top of - the build scripts (used for developer OTA packages which - legitimately need to go back and forth). - - -e (--extra_script) - Insert the contents of file at the end of the update script. - - -a (--aslr_mode) - Specify whether to turn on ASLR for the package (on by default). - - -2 (--two_step) - Generate a 'two-step' OTA package, where recovery is updated - first, so that any changes made to the system partition are done - using the new recovery (new kernel, etc.). - - --block - Generate a block-based OTA if possible. Will fall back to a - file-based OTA if the target_files is older and doesn't support - block-based OTAs. - - -b (--binary) - Use the given binary as the update-binary in the output package, - instead of the binary in the build's target_files. Use for - development only. - - -t (--worker_threads) - Specifies the number of worker-threads that will be used when - generating patches for incremental updates (defaults to 3). - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import copy -import errno -import multiprocessing -import os -import re -import subprocess -import tempfile -import time -import zipfile - -from hashlib import sha1 as sha1 - -import common -import edify_generator -import build_image -import blockimgdiff -import sparse_img - -OPTIONS = common.OPTIONS -OPTIONS.package_key = None -OPTIONS.incremental_source = None -OPTIONS.verify = False -OPTIONS.require_verbatim = set() -OPTIONS.prohibit_verbatim = set(("system/build.prop",)) -OPTIONS.patch_threshold = 0.95 -OPTIONS.wipe_user_data = False -OPTIONS.omit_prereq = False -OPTIONS.extra_script = None -OPTIONS.aslr_mode = True -OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 -if OPTIONS.worker_threads == 0: - OPTIONS.worker_threads = 1 -OPTIONS.two_step = False -OPTIONS.no_signing = False -OPTIONS.block_based = False -OPTIONS.updater_binary = None -OPTIONS.oem_source = None -OPTIONS.fallback_to_full = True - -def MostPopularKey(d, default): - """Given a dict, return the key corresponding to the largest - value. Returns 'default' if the dict is empty.""" - x = [(v, k) for (k, v) in d.iteritems()] - if not x: return default - x.sort() - return x[-1][1] - - -def IsSymlink(info): - """Return true if the zipfile.ZipInfo object passed in represents a - symlink.""" - return (info.external_attr >> 16) == 0120777 - -def IsRegular(info): - """Return true if the zipfile.ZipInfo object passed in represents a - symlink.""" - return (info.external_attr >> 28) == 010 - -def ClosestFileMatch(src, tgtfiles, existing): - """Returns the closest file match between a source file and list - of potential matches. The exact filename match is preferred, - then the sha1 is searched for, and finally a file with the same - basename is evaluated. Rename support in the updater-binary is - required for the latter checks to be used.""" - - result = tgtfiles.get("path:" + src.name) - if result is not None: - return result - - if not OPTIONS.target_info_dict.get("update_rename_support", False): - return None - - if src.size < 1000: - return None - - result = tgtfiles.get("sha1:" + src.sha1) - if result is not None and existing.get(result.name) is None: - return result - result = tgtfiles.get("file:" + src.name.split("/")[-1]) - if result is not None and existing.get(result.name) is None: - return result - return None - -class ItemSet: - def __init__(self, partition, fs_config): - self.partition = partition - self.fs_config = fs_config - self.ITEMS = {} - - def Get(self, name, dir=False): - if name not in self.ITEMS: - self.ITEMS[name] = Item(self, name, dir=dir) - return self.ITEMS[name] - - def GetMetadata(self, input_zip): - # The target_files contains a record of what the uid, - # gid, and mode are supposed to be. - output = input_zip.read(self.fs_config) - - for line in output.split("\n"): - if not line: continue - columns = line.split() - name, uid, gid, mode = columns[:4] - selabel = None - capabilities = None - - # After the first 4 columns, there are a series of key=value - # pairs. Extract out the fields we care about. - for element in columns[4:]: - key, value = element.split("=") - if key == "selabel": - selabel = value - if key == "capabilities": - capabilities = value - - i = self.ITEMS.get(name, None) - if i is not None: - i.uid = int(uid) - i.gid = int(gid) - i.mode = int(mode, 8) - i.selabel = selabel - i.capabilities = capabilities - if i.dir: - i.children.sort(key=lambda i: i.name) - - # set metadata for the files generated by this script. - i = self.ITEMS.get("system/recovery-from-boot.p", None) - if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0644, None, None - i = self.ITEMS.get("system/etc/install-recovery.sh", None) - if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0544, None, None - - -class Item: - """Items represent the metadata (user, group, mode) of files and - directories in the system image.""" - def __init__(self, itemset, name, dir=False): - self.itemset = itemset - self.name = name - self.uid = None - self.gid = None - self.mode = None - self.selabel = None - self.capabilities = None - self.dir = dir - - if name: - self.parent = itemset.Get(os.path.dirname(name), dir=True) - self.parent.children.append(self) - else: - self.parent = None - if dir: - self.children = [] - - def Dump(self, indent=0): - if self.uid is not None: - print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode) - else: - print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode) - if self.dir: - print "%s%s" % (" "*indent, self.descendants) - print "%s%s" % (" "*indent, self.best_subtree) - for i in self.children: - i.Dump(indent=indent+1) - - def CountChildMetadata(self): - """Count up the (uid, gid, mode, selabel, capabilities) tuples for - all children and determine the best strategy for using set_perm_recursive and - set_perm to correctly chown/chmod all the files to their desired - values. Recursively calls itself for all descendants. - - Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} counting up - all descendants of this node. (dmode or fmode may be None.) Also - sets the best_subtree of each directory Item to the (uid, gid, - dmode, fmode, selabel, capabilities) tuple that will match the most - descendants of that Item. - """ - - assert self.dir - d = self.descendants = {(self.uid, self.gid, self.mode, None, self.selabel, self.capabilities): 1} - for i in self.children: - if i.dir: - for k, v in i.CountChildMetadata().iteritems(): - d[k] = d.get(k, 0) + v - else: - k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) - d[k] = d.get(k, 0) + 1 - - # Find the (uid, gid, dmode, fmode, selabel, capabilities) - # tuple that matches the most descendants. - - # First, find the (uid, gid) pair that matches the most - # descendants. - ug = {} - for (uid, gid, _, _, _, _), count in d.iteritems(): - ug[(uid, gid)] = ug.get((uid, gid), 0) + count - ug = MostPopularKey(ug, (0, 0)) - - # Now find the dmode, fmode, selabel, and capabilities that match - # the most descendants with that (uid, gid), and choose those. - best_dmode = (0, 0755) - best_fmode = (0, 0644) - best_selabel = (0, None) - best_capabilities = (0, None) - for k, count in d.iteritems(): - if k[:2] != ug: continue - if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2]) - if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3]) - if k[4] is not None and count >= best_selabel[0]: best_selabel = (count, k[4]) - if k[5] is not None and count >= best_capabilities[0]: best_capabilities = (count, k[5]) - self.best_subtree = ug + (best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) - - return d - - def SetPermissions(self, script): - """Append set_perm/set_perm_recursive commands to 'script' to - set all permissions, users, and groups for the tree of files - rooted at 'self'.""" - - self.CountChildMetadata() - - def recurse(item, current): - # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple that the current - # item (and all its children) have already been set to. We only - # need to issue set_perm/set_perm_recursive commands if we're - # supposed to be something different. - if item.dir: - if current != item.best_subtree: - script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) - current = item.best_subtree - - if item.uid != current[0] or item.gid != current[1] or \ - item.mode != current[2] or item.selabel != current[4] or \ - item.capabilities != current[5]: - script.SetPermissions("/"+item.name, item.uid, item.gid, - item.mode, item.selabel, item.capabilities) - - for i in item.children: - recurse(i, current) - else: - if item.uid != current[0] or item.gid != current[1] or \ - item.mode != current[3] or item.selabel != current[4] or \ - item.capabilities != current[5]: - script.SetPermissions("/"+item.name, item.uid, item.gid, - item.mode, item.selabel, item.capabilities) - - recurse(self, (-1, -1, -1, -1, None, None)) - - -def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): - """Copies files for the partition in the input zip to the output - zip. Populates the Item class with their metadata, and returns a - list of symlinks. output_zip may be None, in which case the copy is - skipped (but the other side effects still happen). substitute is an - optional dict of {output filename: contents} to be output instead of - certain input files. - """ - - symlinks = [] - - partition = itemset.partition - - for info in input_zip.infolist(): - prefix = partition.upper() + "/" - if info.filename.startswith(prefix): - basefilename = info.filename[len(prefix):] - if IsSymlink(info): - symlinks.append((input_zip.read(info.filename), - "/" + partition + "/" + basefilename)) - else: - info2 = copy.copy(info) - fn = info2.filename = partition + "/" + basefilename - if substitute and fn in substitute and substitute[fn] is None: - continue - if output_zip is not None: - if substitute and fn in substitute: - data = substitute[fn] - else: - data = input_zip.read(info.filename) - output_zip.writestr(info2, data) - if fn.endswith("/"): - itemset.Get(fn[:-1], dir=True) - else: - itemset.Get(fn, dir=False) - - symlinks.sort() - return symlinks - - -def SignOutput(temp_zip_name, output_zip_name): - key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) - pw = key_passwords[OPTIONS.package_key] - - common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, - whole_file=True) - - -def AppendAssertions(script, info_dict, oem_dict = None): - oem_props = info_dict.get("oem_fingerprint_properties") - if oem_props is None or len(oem_props) == 0: - device = GetBuildProp("ro.product.device", info_dict) - script.AssertDevice(device) - else: - if oem_dict is None: - raise common.ExternalError("No OEM file provided to answer expected assertions") - for prop in oem_props.split(): - if oem_dict.get(prop) is None: - raise common.ExternalError("The OEM file is missing the property %s" % prop) - script.AssertOemProperty(prop, oem_dict.get(prop)) - - -def HasRecoveryPatch(target_files_zip): - try: - target_files_zip.getinfo("SYSTEM/recovery-from-boot.p") - return True - except KeyError: - return False - -def HasVendorPartition(target_files_zip): - try: - target_files_zip.getinfo("VENDOR/") - return True - except KeyError: - return False - -def GetOemProperty(name, oem_props, oem_dict, info_dict): - if oem_props is not None and name in oem_props: - return oem_dict[name] - return GetBuildProp(name, info_dict) - - -def CalculateFingerprint(oem_props, oem_dict, info_dict): - if oem_props is None: - return GetBuildProp("ro.build.fingerprint", info_dict) - return "%s/%s/%s:%s" % ( - GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), - GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), - GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), - GetBuildProp("ro.build.thumbprint", info_dict)) - - -def GetImage(which, tmpdir, info_dict): - # Return an image object (suitable for passing to BlockImageDiff) - # for the 'which' partition (most be "system" or "vendor"). If a - # prebuilt image and file map are found in tmpdir they are used, - # otherwise they are reconstructed from the individual files. - - assert which in ("system", "vendor") - - path = os.path.join(tmpdir, "IMAGES", which + ".img") - mappath = os.path.join(tmpdir, "IMAGES", which + ".map") - if os.path.exists(path) and os.path.exists(mappath): - print "using %s.img from target-files" % (which,) - # This is a 'new' target-files, which already has the image in it. - - else: - print "building %s.img from target-files" % (which,) - - # This is an 'old' target-files, which does not contain images - # already built. Build them. - - mappath = tempfile.mkstemp()[1] - OPTIONS.tempfiles.append(mappath) - - import add_img_to_target_files - if which == "system": - path = add_img_to_target_files.BuildSystem( - tmpdir, info_dict, block_list=mappath) - elif which == "vendor": - path = add_img_to_target_files.BuildVendor( - tmpdir, info_dict, block_list=mappath) - - return sparse_img.SparseImage(path, mappath) - - -def WriteFullOTAPackage(input_zip, output_zip): - # TODO: how to determine this? We don't know what version it will - # be installed on top of. For now, we expect the API just won't - # change very often. - script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) - - oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines()) - - metadata = {"post-build": CalculateFingerprint( - oem_props, oem_dict, OPTIONS.info_dict), - "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.info_dict), - } - - device_specific = common.DeviceSpecificParams( - input_zip=input_zip, - input_version=OPTIONS.info_dict["recovery_api_version"], - output_zip=output_zip, - script=script, - input_tmp=OPTIONS.input_tmp, - metadata=metadata, - info_dict=OPTIONS.info_dict) - - has_recovery_patch = HasRecoveryPatch(input_zip) - block_based = OPTIONS.block_based and has_recovery_patch - - if not OPTIONS.omit_prereq: - ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) - ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) - script.AssertOlderBuild(ts, ts_text) - - AppendAssertions(script, OPTIONS.info_dict, oem_dict) - device_specific.FullOTA_Assertions() - - # Two-step package strategy (in chronological order, which is *not* - # the order in which the generated script has things): - # - # if stage is not "2/3" or "3/3": - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # set stage to "" - # do normal full package installation: - # wipe and install system, boot image, etc. - # set up system to update recovery partition on first boot - # complete script normally (allow recovery to mark itself finished and reboot) - - recovery_img = common.GetBootableImage("recovery.img", "recovery.img", - OPTIONS.input_tmp, "RECOVERY") - if OPTIONS.two_step: - if not OPTIONS.info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") == "3/3" then -""" % bcb_dev) - - device_specific.FullOTA_InstallBegin() - - system_progress = 0.75 - - if OPTIONS.wipe_user_data: - system_progress -= 0.1 - if HasVendorPartition(input_zip): - system_progress -= 0.1 - - if "selinux_fc" in OPTIONS.info_dict: - WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) - - recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") - - system_items = ItemSet("system", "META/filesystem_config.txt") - script.ShowProgress(system_progress, 0) - - if block_based: - # Full OTA is done as an "incremental" against an empty source - # image. This has the effect of writing new data from the package - # to the entire partition, but lets us reuse the updater code that - # writes incrementals to do it. - system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) - system_tgt.ResetFileMap() - system_diff = common.BlockDifference("system", system_tgt, src=None) - system_diff.WriteScript(script, output_zip) - else: - script.FormatPartition("/system") - script.Mount("/system", recovery_mount_options) - if not has_recovery_patch: - script.UnpackPackageDir("recovery", "/system") - script.UnpackPackageDir("system", "/system") - - symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) - script.MakeSymlinks(symlinks) - - boot_img = common.GetBootableImage("boot.img", "boot.img", - OPTIONS.input_tmp, "BOOT") - - if not block_based: - def output_sink(fn, data): - common.ZipWriteStr(output_zip, "recovery/" + fn, data) - system_items.Get("system/" + fn, dir=False) - - common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, - recovery_img, boot_img) - - system_items.GetMetadata(input_zip) - system_items.Get("system").SetPermissions(script) - - if HasVendorPartition(input_zip): - vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") - script.ShowProgress(0.1, 0) - - if block_based: - vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) - vendor_tgt.ResetFileMap() - vendor_diff = common.BlockDifference("vendor", vendor_tgt) - vendor_diff.WriteScript(script, output_zip) - else: - script.FormatPartition("/vendor") - script.Mount("/vendor", recovery_mount_options) - script.UnpackPackageDir("vendor", "/vendor") - - symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) - script.MakeSymlinks(symlinks) - - vendor_items.GetMetadata(input_zip) - vendor_items.Get("vendor").SetPermissions(script) - - common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) - common.ZipWriteStr(output_zip, "boot.img", boot_img.data) - - script.ShowProgress(0.05, 5) - script.WriteRawImage("/boot", "boot.img") - - script.ShowProgress(0.2, 10) - device_specific.FullOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - script.UnmountAll() - - if OPTIONS.wipe_user_data: - script.ShowProgress(0.1, 10) - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -""" % bcb_dev) - script.AppendExtra("else\n") - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) - WriteMetadata(metadata, output_zip) - - -def WritePolicyConfig(file_name, output_zip): - common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) - - -def WriteMetadata(metadata, output_zip): - common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", - "".join(["%s=%s\n" % kv - for kv in sorted(metadata.iteritems())])) - - -def LoadPartitionFiles(z, partition): - """Load all the files from the given partition in a given target-files - ZipFile, and return a dict of {filename: File object}.""" - out = {} - prefix = partition.upper() + "/" - for info in z.infolist(): - if info.filename.startswith(prefix) and not IsSymlink(info): - basefilename = info.filename[len(prefix):] - fn = partition + "/" + basefilename - data = z.read(info.filename) - out[fn] = common.File(fn, data) - return out - - -def GetBuildProp(prop, info_dict): - """Return the fingerprint of the build of a given target-files info_dict.""" - try: - return info_dict.get("build.prop", {})[prop] - except KeyError: - raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) - - -def AddToKnownPaths(filename, known_paths): - if filename[-1] == "/": - return - dirs = filename.split("/")[:-1] - while len(dirs) > 0: - path = "/".join(dirs) - if path in known_paths: - break; - known_paths.add(path) - dirs.pop() - - -def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): - source_version = OPTIONS.source_info_dict["recovery_api_version"] - target_version = OPTIONS.target_info_dict["recovery_api_version"] - - if source_version == 0: - print ("WARNING: generating edify script for a source that " - "can't install it.") - script = edify_generator.EdifyGenerator(source_version, - OPTIONS.target_info_dict) - - metadata = {"pre-device": GetBuildProp("ro.product.device", - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - - device_specific = common.DeviceSpecificParams( - source_zip=source_zip, - source_version=source_version, - target_zip=target_zip, - target_version=target_version, - output_zip=output_zip, - script=script, - metadata=metadata, - info_dict=OPTIONS.info_dict) - - source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) - target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp - - source_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", - OPTIONS.source_info_dict) - target_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") - updating_boot = (not OPTIONS.two_step and - (source_boot.data != target_boot.data)) - - source_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", - OPTIONS.source_info_dict) - target_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") - updating_recovery = (source_recovery.data != target_recovery.data) - - system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) - system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) - - blockimgdiff_version = 1 - if OPTIONS.info_dict: - blockimgdiff_version = max( - int(i) for i in - OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) - - system_diff = common.BlockDifference("system", system_tgt, system_src, - check_first_block=True, - version=blockimgdiff_version) - - if HasVendorPartition(target_zip): - if not HasVendorPartition(source_zip): - raise RuntimeError("can't generate incremental that adds /vendor") - vendor_src = GetImage("vendor", OPTIONS.source_tmp, OPTIONS.source_info_dict) - vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, OPTIONS.target_info_dict) - vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, - check_first_block=True, - version=blockimgdiff_version) - else: - vendor_diff = None - - oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.target_info_dict.get("recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines()) - - AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) - device_specific.IncrementalOTA_Assertions() - - # Two-step incremental package strategy (in chronological order, - # which is *not* the order in which the generated script has - # things): - # - # if stage is not "2/3" or "3/3": - # do verification on current system - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # perform update: - # patch system files, etc. - # force full install of new boot image - # set up system to update recovery partition on first boot - # complete script normally (allow recovery to mark itself finished and reboot) - - if OPTIONS.two_step: - if not OPTIONS.info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.AppendExtra("sleep(20);\n"); - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") != "3/3" then -""" % bcb_dev) - - script.Print("Verifying current system...") - - device_specific.IncrementalOTA_VerifyBegin() - - if oem_props is None: - # When blockimgdiff version is less than 3 (non-resumable block-based OTA), - # patching on a device that's already on the target build will damage the - # system. Because operations like move don't check the block state, they - # always apply the changes unconditionally. - if blockimgdiff_version <= 2: - script.AssertSomeFingerprint(source_fp) - else: - script.AssertSomeFingerprint(source_fp, target_fp) - else: - if blockimgdiff_version <= 2: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - else: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - - if updating_boot: - boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) - d = common.Difference(target_boot, source_boot) - _, _, d = d.ComputePatch() - if d is None: - include_full_boot = True - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - else: - include_full_boot = False - - print "boot target: %d source: %d diff: %d" % ( - target_boot.size, source_boot.size, len(d)) - - common.ZipWriteStr(output_zip, "patch/boot.img.p", d) - - script.PatchCheck("%s:%s:%d:%s:%d:%s" % - (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1)) - - device_specific.IncrementalOTA_VerifyEnd() - - if OPTIONS.two_step: - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -else -""" % bcb_dev) - - # Verify the existing partitions. - system_diff.WriteVerifyScript(script) - if vendor_diff: - vendor_diff.WriteVerifyScript(script) - - script.Comment("---- start making changes here ----") - - device_specific.IncrementalOTA_InstallBegin() - - system_diff.WriteScript(script, output_zip, - progress=0.8 if vendor_diff else 0.9) - if vendor_diff: - vendor_diff.WriteScript(script, output_zip, progress=0.1) - - if OPTIONS.two_step: - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - script.WriteRawImage("/boot", "boot.img") - print "writing full boot image (forced by two-step mode)" - - if not OPTIONS.two_step: - if updating_boot: - if include_full_boot: - print "boot image changed; including full." - script.Print("Installing boot image...") - script.WriteRawImage("/boot", "boot.img") - else: - # Produce the boot image by applying a patch to the current - # contents of the boot partition, and write it back to the - # partition. - print "boot image changed; including patch." - script.Print("Patching boot image...") - script.ShowProgress(0.1, 10) - script.ApplyPatch("%s:%s:%d:%s:%d:%s" - % (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1), - "-", - target_boot.size, target_boot.sha1, - source_boot.sha1, "patch/boot.img.p") - else: - print "boot image unchanged; skipping." - - # Do device-specific installation (eg, write radio image). - device_specific.IncrementalOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - if OPTIONS.wipe_user_data: - script.Print("Erasing user data...") - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - - script.SetProgress(1) - script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) - WriteMetadata(metadata, output_zip) - - -class FileDifference: - def __init__(self, partition, source_zip, target_zip, output_zip): - print "Loading target..." - self.target_data = target_data = LoadPartitionFiles(target_zip, partition) - print "Loading source..." - self.source_data = source_data = LoadPartitionFiles(source_zip, partition) - - self.verbatim_targets = verbatim_targets = [] - self.patch_list = patch_list = [] - diffs = [] - self.renames = renames = {} - known_paths = set() - largest_source_size = 0 - - matching_file_cache = {} - for fn, sf in source_data.items(): - assert fn == sf.name - matching_file_cache["path:" + fn] = sf - if fn in target_data.keys(): - AddToKnownPaths(fn, known_paths) - # Only allow eligibility for filename/sha matching - # if there isn't a perfect path match. - if target_data.get(sf.name) is None: - matching_file_cache["file:" + fn.split("/")[-1]] = sf - matching_file_cache["sha:" + sf.sha1] = sf - - for fn in sorted(target_data.keys()): - tf = target_data[fn] - assert fn == tf.name - sf = ClosestFileMatch(tf, matching_file_cache, renames) - if sf is not None and sf.name != tf.name: - print "File has moved from " + sf.name + " to " + tf.name - renames[sf.name] = tf - - if sf is None or fn in OPTIONS.require_verbatim: - # This file should be included verbatim - if fn in OPTIONS.prohibit_verbatim: - raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) - print "send", fn, "verbatim" - tf.AddToZip(output_zip) - verbatim_targets.append((fn, tf.size, tf.sha1)) - if fn in target_data.keys(): - AddToKnownPaths(fn, known_paths) - elif tf.sha1 != sf.sha1: - # File is different; consider sending as a patch - diffs.append(common.Difference(tf, sf)) - else: - # Target file data identical to source (may still be renamed) - pass - - common.ComputeDifferences(diffs) - - for diff in diffs: - tf, sf, d = diff.GetPatch() - path = "/".join(tf.name.split("/")[:-1]) - if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ - path not in known_paths: - # patch is almost as big as the file; don't bother patching - # or a patch + rename cannot take place due to the target - # directory not existing - tf.AddToZip(output_zip) - verbatim_targets.append((tf.name, tf.size, tf.sha1)) - if sf.name in renames: - del renames[sf.name] - AddToKnownPaths(tf.name, known_paths) - else: - common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) - patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) - largest_source_size = max(largest_source_size, sf.size) - - self.largest_source_size = largest_source_size - - def EmitVerification(self, script): - so_far = 0 - for tf, sf, size, patch_sha in self.patch_list: - if tf.name != sf.name: - script.SkipNextActionIfTargetExists(tf.name, tf.sha1) - script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) - so_far += sf.size - return so_far - - def EmitExplicitTargetVerification(self, script): - for fn, size, sha1 in self.verbatim_targets: - if (fn[-1] != "/"): - script.FileCheck("/"+fn, sha1) - for tf, _, _, _ in self.patch_list: - script.FileCheck(tf.name, tf.sha1) - - def RemoveUnneededFiles(self, script, extras=()): - script.DeleteFiles(["/"+i[0] for i in self.verbatim_targets] + - ["/"+i for i in sorted(self.source_data) - if i not in self.target_data and - i not in self.renames] + - list(extras)) - - def TotalPatchSize(self): - return sum(i[1].size for i in self.patch_list) - - def EmitPatches(self, script, total_patch_size, so_far): - self.deferred_patch_list = deferred_patch_list = [] - for item in self.patch_list: - tf, sf, size, _ = item - if tf.name == "system/build.prop": - deferred_patch_list.append(item) - continue - if (sf.name != tf.name): - script.SkipNextActionIfTargetExists(tf.name, tf.sha1) - script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p") - so_far += tf.size - script.SetProgress(so_far / total_patch_size) - return so_far - - def EmitDeferredPatches(self, script): - for item in self.deferred_patch_list: - tf, sf, size, _ = item - script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p") - script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None) - - def EmitRenames(self, script): - if len(self.renames) > 0: - script.Print("Renaming files...") - for src, tgt in self.renames.iteritems(): - print "Renaming " + src + " to " + tgt.name - script.RenameFile(src, tgt.name) - - - - -def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): - target_has_recovery_patch = HasRecoveryPatch(target_zip) - source_has_recovery_patch = HasRecoveryPatch(source_zip) - - if (OPTIONS.block_based and - target_has_recovery_patch and - source_has_recovery_patch): - return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) - - source_version = OPTIONS.source_info_dict["recovery_api_version"] - target_version = OPTIONS.target_info_dict["recovery_api_version"] - - if source_version == 0: - print ("WARNING: generating edify script for a source that " - "can't install it.") - script = edify_generator.EdifyGenerator(source_version, - OPTIONS.target_info_dict) - - oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines()) - - metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - - device_specific = common.DeviceSpecificParams( - source_zip=source_zip, - source_version=source_version, - target_zip=target_zip, - target_version=target_version, - output_zip=output_zip, - script=script, - metadata=metadata, - info_dict=OPTIONS.info_dict) - - system_diff = FileDifference("system", source_zip, target_zip, output_zip) - script.Mount("/system", recovery_mount_options) - if HasVendorPartition(target_zip): - vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) - script.Mount("/vendor", recovery_mount_options) - else: - vendor_diff = None - - target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict) - source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict) - - if oem_props is None: - script.AssertSomeFingerprint(source_fp, target_fp) - else: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp - - source_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", - OPTIONS.source_info_dict) - target_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") - updating_boot = (not OPTIONS.two_step and - (source_boot.data != target_boot.data)) - - source_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", - OPTIONS.source_info_dict) - target_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") - updating_recovery = (source_recovery.data != target_recovery.data) - - # Here's how we divide up the progress bar: - # 0.1 for verifying the start state (PatchCheck calls) - # 0.8 for applying patches (ApplyPatch calls) - # 0.1 for unpacking verbatim files, symlinking, and doing the - # device-specific commands. - - AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) - device_specific.IncrementalOTA_Assertions() - - # Two-step incremental package strategy (in chronological order, - # which is *not* the order in which the generated script has - # things): - # - # if stage is not "2/3" or "3/3": - # do verification on current system - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # perform update: - # patch system files, etc. - # force full install of new boot image - # set up system to update recovery partition on first boot - # complete script normally (allow recovery to mark itself finished and reboot) - - if OPTIONS.two_step: - if not OPTIONS.info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.AppendExtra("sleep(20);\n"); - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") != "3/3" then -""" % bcb_dev) - - script.Print("Verifying current system...") - - device_specific.IncrementalOTA_VerifyBegin() - - script.ShowProgress(0.1, 0) - so_far = system_diff.EmitVerification(script) - if vendor_diff: - so_far += vendor_diff.EmitVerification(script) - - if updating_boot: - d = common.Difference(target_boot, source_boot) - _, _, d = d.ComputePatch() - print "boot target: %d source: %d diff: %d" % ( - target_boot.size, source_boot.size, len(d)) - - common.ZipWriteStr(output_zip, "patch/boot.img.p", d) - - boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) - - script.PatchCheck("%s:%s:%d:%s:%d:%s" % - (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1)) - so_far += source_boot.size - - size = [] - if system_diff.patch_list: size.append(system_diff.largest_source_size) - if vendor_diff: - if vendor_diff.patch_list: size.append(vendor_diff.largest_source_size) - if size or updating_recovery or updating_boot: - script.CacheFreeSpaceCheck(max(size)) - - device_specific.IncrementalOTA_VerifyEnd() - - if OPTIONS.two_step: - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -else -""" % bcb_dev) - - script.Comment("---- start making changes here ----") - - device_specific.IncrementalOTA_InstallBegin() - - if OPTIONS.two_step: - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - script.WriteRawImage("/boot", "boot.img") - print "writing full boot image (forced by two-step mode)" - - script.Print("Removing unneeded files...") - system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) - if vendor_diff: - vendor_diff.RemoveUnneededFiles(script) - - script.ShowProgress(0.8, 0) - total_patch_size = 1.0 + system_diff.TotalPatchSize() - if vendor_diff: - total_patch_size += vendor_diff.TotalPatchSize() - if updating_boot: - total_patch_size += target_boot.size - - script.Print("Patching system files...") - so_far = system_diff.EmitPatches(script, total_patch_size, 0) - if vendor_diff: - script.Print("Patching vendor files...") - so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) - - if not OPTIONS.two_step: - if updating_boot: - # Produce the boot image by applying a patch to the current - # contents of the boot partition, and write it back to the - # partition. - script.Print("Patching boot image...") - script.ApplyPatch("%s:%s:%d:%s:%d:%s" - % (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1), - "-", - target_boot.size, target_boot.sha1, - source_boot.sha1, "patch/boot.img.p") - so_far += target_boot.size - script.SetProgress(so_far / total_patch_size) - print "boot image changed; including." - else: - print "boot image unchanged; skipping." - - system_items = ItemSet("system", "META/filesystem_config.txt") - if vendor_diff: - vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") - - if updating_recovery: - # Recovery is generated as a patch using both the boot image - # (which contains the same linux kernel as recovery) and the file - # /system/etc/recovery-resource.dat (which contains all the images - # used in the recovery UI) as sources. This lets us minimize the - # size of the patch, which must be included in every OTA package. - # - # For older builds where recovery-resource.dat is not present, we - # use only the boot image as the source. - - if not target_has_recovery_patch: - def output_sink(fn, data): - common.ZipWriteStr(output_zip, "recovery/" + fn, data) - system_items.Get("system/" + fn, dir=False) - - common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, - target_recovery, target_boot) - script.DeleteFiles(["/system/recovery-from-boot.p", - "/system/etc/install-recovery.sh"]) - print "recovery image changed; including as patch from boot." - else: - print "recovery image unchanged; skipping." - - script.ShowProgress(0.1, 10) - - target_symlinks = CopyPartitionFiles(system_items, target_zip, None) - if vendor_diff: - target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) - - temp_script = script.MakeTemporary() - system_items.GetMetadata(target_zip) - system_items.Get("system").SetPermissions(temp_script) - if vendor_diff: - vendor_items.GetMetadata(target_zip) - vendor_items.Get("vendor").SetPermissions(temp_script) - - # Note that this call will mess up the trees of Items, so make sure - # we're done with them. - source_symlinks = CopyPartitionFiles(system_items, source_zip, None) - if vendor_diff: - source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) - - target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) - source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) - - # Delete all the symlinks in source that aren't in target. This - # needs to happen before verbatim files are unpacked, in case a - # symlink in the source is replaced by a real file in the target. - to_delete = [] - for dest, link in source_symlinks: - if link not in target_symlinks_d: - to_delete.append(link) - script.DeleteFiles(to_delete) - - if system_diff.verbatim_targets: - script.Print("Unpacking new system files...") - script.UnpackPackageDir("system", "/system") - if vendor_diff and vendor_diff.verbatim_targets: - script.Print("Unpacking new vendor files...") - script.UnpackPackageDir("vendor", "/vendor") - - if updating_recovery and not target_has_recovery_patch: - script.Print("Unpacking new recovery...") - script.UnpackPackageDir("recovery", "/system") - - system_diff.EmitRenames(script) - if vendor_diff: - vendor_diff.EmitRenames(script) - - script.Print("Symlinks and permissions...") - - # Create all the symlinks that don't already exist, or point to - # somewhere different than what we want. Delete each symlink before - # creating it, since the 'symlink' command won't overwrite. - to_create = [] - for dest, link in target_symlinks: - if link in source_symlinks_d: - if dest != source_symlinks_d[link]: - to_create.append((dest, link)) - else: - to_create.append((dest, link)) - script.DeleteFiles([i[1] for i in to_create]) - script.MakeSymlinks(to_create) - - # Now that the symlinks are created, we can set all the - # permissions. - script.AppendScript(temp_script) - - # Do device-specific installation (eg, write radio image). - device_specific.IncrementalOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - # Patch the build.prop file last, so if something fails but the - # device can still come up, it appears to be the old build and will - # get set the OTA package again to retry. - script.Print("Patching remaining system files...") - system_diff.EmitDeferredPatches(script) - - if OPTIONS.wipe_user_data: - script.Print("Erasing user data...") - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - - if OPTIONS.verify and system_diff: - script.Print("Remounting and verifying system partition files...") - script.Unmount("/system") - script.Mount("/system") - system_diff.EmitExplicitTargetVerification(script) - - if OPTIONS.verify and vendor_diff: - script.Print("Remounting and verifying vendor partition files...") - script.Unmount("/vendor") - script.Mount("/vendor") - vendor_diff.EmitExplicitTargetVerification(script) - script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) - - WriteMetadata(metadata, output_zip) - - -def main(argv): - - def option_handler(o, a): - if o == "--board_config": - pass # deprecated - elif o in ("-k", "--package_key"): - OPTIONS.package_key = a - elif o in ("-i", "--incremental_from"): - OPTIONS.incremental_source = a - elif o in ("-w", "--wipe_user_data"): - OPTIONS.wipe_user_data = True - elif o in ("-n", "--no_prereq"): - OPTIONS.omit_prereq = True - elif o in ("-o", "--oem_settings"): - OPTIONS.oem_source = a - elif o in ("-e", "--extra_script"): - OPTIONS.extra_script = a - elif o in ("-a", "--aslr_mode"): - if a in ("on", "On", "true", "True", "yes", "Yes"): - OPTIONS.aslr_mode = True - else: - OPTIONS.aslr_mode = False - elif o in ("-t", "--worker_threads"): - if a.isdigit(): - OPTIONS.worker_threads = int(a) - else: - raise ValueError("Cannot parse value %r for option %r - only " - "integers are allowed." % (a, o)) - elif o in ("-2", "--two_step"): - OPTIONS.two_step = True - elif o == "--no_signing": - OPTIONS.no_signing = True - elif o in ("--verify"): - OPTIONS.verify = True - elif o == "--block": - OPTIONS.block_based = True - elif o in ("-b", "--binary"): - OPTIONS.updater_binary = a - elif o in ("--no_fallback_to_full",): - OPTIONS.fallback_to_full = False - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="b:k:i:d:wne:t:a:2o:", - extra_long_opts=["board_config=", - "package_key=", - "incremental_from=", - "wipe_user_data", - "no_prereq", - "extra_script=", - "worker_threads=", - "aslr_mode=", - "two_step", - "no_signing", - "block", - "binary=", - "oem_settings=", - "verify", - "no_fallback_to_full", - ], - extra_option_handler=option_handler) - - if len(args) != 2: - common.Usage(__doc__) - sys.exit(1) - - if OPTIONS.extra_script is not None: - OPTIONS.extra_script = open(OPTIONS.extra_script).read() - - print "unzipping target target-files..." - OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) - - OPTIONS.target_tmp = OPTIONS.input_tmp - OPTIONS.info_dict = common.LoadInfoDict(input_zip) - - # If this image was originally labelled with SELinux contexts, make sure we - # also apply the labels in our new image. During building, the "file_contexts" - # is in the out/ directory tree, but for repacking from target-files.zip it's - # in the root directory of the ramdisk. - if "selinux_fc" in OPTIONS.info_dict: - OPTIONS.info_dict["selinux_fc"] = os.path.join(OPTIONS.input_tmp, "BOOT", "RAMDISK", - "file_contexts") - - if OPTIONS.verbose: - print "--- target info ---" - common.DumpInfoDict(OPTIONS.info_dict) - - # If the caller explicitly specified the device-specific extensions - # path via -s/--device_specific, use that. Otherwise, use - # META/releasetools.py if it is present in the target target_files. - # Otherwise, take the path of the file from 'tool_extensions' in the - # info dict and look for that in the local filesystem, relative to - # the current directory. - - if OPTIONS.device_specific is None: - from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") - if os.path.exists(from_input): - print "(using device-specific extensions from target_files)" - OPTIONS.device_specific = from_input - else: - OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) - - if OPTIONS.device_specific is not None: - OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) - - while True: - - if OPTIONS.no_signing: - if os.path.exists(args[1]): os.unlink(args[1]) - output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED) - else: - temp_zip_file = tempfile.NamedTemporaryFile() - output_zip = zipfile.ZipFile(temp_zip_file, "w", - compression=zipfile.ZIP_DEFLATED) - - if OPTIONS.incremental_source is None: - WriteFullOTAPackage(input_zip, output_zip) - if OPTIONS.package_key is None: - OPTIONS.package_key = OPTIONS.info_dict.get( - "default_system_dev_certificate", - "build/target/product/security/testkey") - break - - else: - print "unzipping source target-files..." - OPTIONS.source_tmp, source_zip = common.UnzipTemp(OPTIONS.incremental_source) - OPTIONS.target_info_dict = OPTIONS.info_dict - OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) - if "selinux_fc" in OPTIONS.source_info_dict: - OPTIONS.source_info_dict["selinux_fc"] = os.path.join(OPTIONS.source_tmp, "BOOT", "RAMDISK", - "file_contexts") - if OPTIONS.package_key is None: - OPTIONS.package_key = OPTIONS.source_info_dict.get( - "default_system_dev_certificate", - "build/target/product/security/testkey") - if OPTIONS.verbose: - print "--- source info ---" - common.DumpInfoDict(OPTIONS.source_info_dict) - try: - WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) - break - except ValueError: - if not OPTIONS.fallback_to_full: raise - print "--- failed to build incremental; falling back to full ---" - OPTIONS.incremental_source = None - output_zip.close() - - output_zip.close() - - if not OPTIONS.no_signing: - SignOutput(temp_zip_file.name, args[1]) - temp_zip_file.close() - - print "done." - - -if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) - except common.ExternalError, e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) - finally: - common.Cleanup() diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files new file mode 120000 index 0000000000..6755a902f0 --- /dev/null +++ b/tools/releasetools/ota_from_target_files @@ -0,0 +1 @@ +ota_from_target_files.py \ No newline at end of file diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py new file mode 100755 index 0000000000..26fbaf0fc8 --- /dev/null +++ b/tools/releasetools/ota_from_target_files.py @@ -0,0 +1,1602 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Given a target-files zipfile, produces an OTA package that installs +that build. An incremental OTA is produced if -i is given, otherwise +a full OTA is produced. + +Usage: ota_from_target_files [flags] input_target_files output_ota_package + + --board_config + Deprecated. + + -k (--package_key) Key to use to sign the package (default is + the value of default_system_dev_certificate from the input + target-files's META/misc_info.txt, or + "build/target/product/security/testkey" if that value is not + specified). + + For incremental OTAs, the default value is based on the source + target-file, not the target build. + + -i (--incremental_from) + Generate an incremental OTA using the given target-files zip as + the starting build. + + -v (--verify) + Remount and verify the checksums of the files written to the + system and vendor (if used) partitions. Incremental builds only. + + -o (--oem_settings) + Use the file to specify the expected OEM-specific properties + on the OEM partition of the intended device. + + -w (--wipe_user_data) + Generate an OTA package that will wipe the user data partition + when installed. + + -n (--no_prereq) + Omit the timestamp prereq check normally included at the top of + the build scripts (used for developer OTA packages which + legitimately need to go back and forth). + + -e (--extra_script) + Insert the contents of file at the end of the update script. + + -a (--aslr_mode) + Specify whether to turn on ASLR for the package (on by default). + + -2 (--two_step) + Generate a 'two-step' OTA package, where recovery is updated + first, so that any changes made to the system partition are done + using the new recovery (new kernel, etc.). + + --block + Generate a block-based OTA if possible. Will fall back to a + file-based OTA if the target_files is older and doesn't support + block-based OTAs. + + -b (--binary) + Use the given binary as the update-binary in the output package, + instead of the binary in the build's target_files. Use for + development only. + + -t (--worker_threads) + Specifies the number of worker-threads that will be used when + generating patches for incremental updates (defaults to 3). + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import copy +import multiprocessing +import os +import tempfile +import zipfile + +import common +import edify_generator +import sparse_img + +OPTIONS = common.OPTIONS +OPTIONS.package_key = None +OPTIONS.incremental_source = None +OPTIONS.verify = False +OPTIONS.require_verbatim = set() +OPTIONS.prohibit_verbatim = set(("system/build.prop",)) +OPTIONS.patch_threshold = 0.95 +OPTIONS.wipe_user_data = False +OPTIONS.omit_prereq = False +OPTIONS.extra_script = None +OPTIONS.aslr_mode = True +OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 +if OPTIONS.worker_threads == 0: + OPTIONS.worker_threads = 1 +OPTIONS.two_step = False +OPTIONS.no_signing = False +OPTIONS.block_based = False +OPTIONS.updater_binary = None +OPTIONS.oem_source = None +OPTIONS.fallback_to_full = True + +def MostPopularKey(d, default): + """Given a dict, return the key corresponding to the largest + value. Returns 'default' if the dict is empty.""" + x = [(v, k) for (k, v) in d.iteritems()] + if not x: + return default + x.sort() + return x[-1][1] + + +def IsSymlink(info): + """Return true if the zipfile.ZipInfo object passed in represents a + symlink.""" + return (info.external_attr >> 16) == 0o120777 + +def IsRegular(info): + """Return true if the zipfile.ZipInfo object passed in represents a + symlink.""" + return (info.external_attr >> 28) == 0o10 + +def ClosestFileMatch(src, tgtfiles, existing): + """Returns the closest file match between a source file and list + of potential matches. The exact filename match is preferred, + then the sha1 is searched for, and finally a file with the same + basename is evaluated. Rename support in the updater-binary is + required for the latter checks to be used.""" + + result = tgtfiles.get("path:" + src.name) + if result is not None: + return result + + if not OPTIONS.target_info_dict.get("update_rename_support", False): + return None + + if src.size < 1000: + return None + + result = tgtfiles.get("sha1:" + src.sha1) + if result is not None and existing.get(result.name) is None: + return result + result = tgtfiles.get("file:" + src.name.split("/")[-1]) + if result is not None and existing.get(result.name) is None: + return result + return None + +class ItemSet(object): + def __init__(self, partition, fs_config): + self.partition = partition + self.fs_config = fs_config + self.ITEMS = {} + + def Get(self, name, is_dir=False): + if name not in self.ITEMS: + self.ITEMS[name] = Item(self, name, is_dir=is_dir) + return self.ITEMS[name] + + def GetMetadata(self, input_zip): + # The target_files contains a record of what the uid, + # gid, and mode are supposed to be. + output = input_zip.read(self.fs_config) + + for line in output.split("\n"): + if not line: + continue + columns = line.split() + name, uid, gid, mode = columns[:4] + selabel = None + capabilities = None + + # After the first 4 columns, there are a series of key=value + # pairs. Extract out the fields we care about. + for element in columns[4:]: + key, value = element.split("=") + if key == "selabel": + selabel = value + if key == "capabilities": + capabilities = value + + i = self.ITEMS.get(name, None) + if i is not None: + i.uid = int(uid) + i.gid = int(gid) + i.mode = int(mode, 8) + i.selabel = selabel + i.capabilities = capabilities + if i.is_dir: + i.children.sort(key=lambda i: i.name) + + # set metadata for the files generated by this script. + i = self.ITEMS.get("system/recovery-from-boot.p", None) + if i: + i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None + i = self.ITEMS.get("system/etc/install-recovery.sh", None) + if i: + i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None + + +class Item(object): + """Items represent the metadata (user, group, mode) of files and + directories in the system image.""" + def __init__(self, itemset, name, is_dir=False): + self.itemset = itemset + self.name = name + self.uid = None + self.gid = None + self.mode = None + self.selabel = None + self.capabilities = None + self.is_dir = is_dir + self.descendants = None + self.best_subtree = None + + if name: + self.parent = itemset.Get(os.path.dirname(name), is_dir=True) + self.parent.children.append(self) + else: + self.parent = None + if self.is_dir: + self.children = [] + + def Dump(self, indent=0): + if self.uid is not None: + print "%s%s %d %d %o" % ( + " " * indent, self.name, self.uid, self.gid, self.mode) + else: + print "%s%s %s %s %s" % ( + " " * indent, self.name, self.uid, self.gid, self.mode) + if self.is_dir: + print "%s%s" % (" "*indent, self.descendants) + print "%s%s" % (" "*indent, self.best_subtree) + for i in self.children: + i.Dump(indent=indent+1) + + def CountChildMetadata(self): + """Count up the (uid, gid, mode, selabel, capabilities) tuples for + all children and determine the best strategy for using set_perm_recursive + and set_perm to correctly chown/chmod all the files to their desired + values. Recursively calls itself for all descendants. + + Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} + counting up all descendants of this node. (dmode or fmode may be None.) + Also sets the best_subtree of each directory Item to the (uid, gid, dmode, + fmode, selabel, capabilities) tuple that will match the most descendants of + that Item. + """ + + assert self.is_dir + key = (self.uid, self.gid, self.mode, None, self.selabel, + self.capabilities) + self.descendants = {key: 1} + d = self.descendants + for i in self.children: + if i.is_dir: + for k, v in i.CountChildMetadata().iteritems(): + d[k] = d.get(k, 0) + v + else: + k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) + d[k] = d.get(k, 0) + 1 + + # Find the (uid, gid, dmode, fmode, selabel, capabilities) + # tuple that matches the most descendants. + + # First, find the (uid, gid) pair that matches the most + # descendants. + ug = {} + for (uid, gid, _, _, _, _), count in d.iteritems(): + ug[(uid, gid)] = ug.get((uid, gid), 0) + count + ug = MostPopularKey(ug, (0, 0)) + + # Now find the dmode, fmode, selabel, and capabilities that match + # the most descendants with that (uid, gid), and choose those. + best_dmode = (0, 0o755) + best_fmode = (0, 0o644) + best_selabel = (0, None) + best_capabilities = (0, None) + for k, count in d.iteritems(): + if k[:2] != ug: + continue + if k[2] is not None and count >= best_dmode[0]: + best_dmode = (count, k[2]) + if k[3] is not None and count >= best_fmode[0]: + best_fmode = (count, k[3]) + if k[4] is not None and count >= best_selabel[0]: + best_selabel = (count, k[4]) + if k[5] is not None and count >= best_capabilities[0]: + best_capabilities = (count, k[5]) + self.best_subtree = ug + ( + best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) + + return d + + def SetPermissions(self, script): + """Append set_perm/set_perm_recursive commands to 'script' to + set all permissions, users, and groups for the tree of files + rooted at 'self'.""" + + self.CountChildMetadata() + + def recurse(item, current): + # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple + # that the current item (and all its children) have already been set to. + # We only need to issue set_perm/set_perm_recursive commands if we're + # supposed to be something different. + if item.is_dir: + if current != item.best_subtree: + script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) + current = item.best_subtree + + if item.uid != current[0] or item.gid != current[1] or \ + item.mode != current[2] or item.selabel != current[4] or \ + item.capabilities != current[5]: + script.SetPermissions("/"+item.name, item.uid, item.gid, + item.mode, item.selabel, item.capabilities) + + for i in item.children: + recurse(i, current) + else: + if item.uid != current[0] or item.gid != current[1] or \ + item.mode != current[3] or item.selabel != current[4] or \ + item.capabilities != current[5]: + script.SetPermissions("/"+item.name, item.uid, item.gid, + item.mode, item.selabel, item.capabilities) + + recurse(self, (-1, -1, -1, -1, None, None)) + + +def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): + """Copies files for the partition in the input zip to the output + zip. Populates the Item class with their metadata, and returns a + list of symlinks. output_zip may be None, in which case the copy is + skipped (but the other side effects still happen). substitute is an + optional dict of {output filename: contents} to be output instead of + certain input files. + """ + + symlinks = [] + + partition = itemset.partition + + for info in input_zip.infolist(): + prefix = partition.upper() + "/" + if info.filename.startswith(prefix): + basefilename = info.filename[len(prefix):] + if IsSymlink(info): + symlinks.append((input_zip.read(info.filename), + "/" + partition + "/" + basefilename)) + else: + info2 = copy.copy(info) + fn = info2.filename = partition + "/" + basefilename + if substitute and fn in substitute and substitute[fn] is None: + continue + if output_zip is not None: + if substitute and fn in substitute: + data = substitute[fn] + else: + data = input_zip.read(info.filename) + output_zip.writestr(info2, data) + if fn.endswith("/"): + itemset.Get(fn[:-1], is_dir=True) + else: + itemset.Get(fn) + + symlinks.sort() + return symlinks + + +def SignOutput(temp_zip_name, output_zip_name): + key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) + pw = key_passwords[OPTIONS.package_key] + + common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, + whole_file=True) + + +def AppendAssertions(script, info_dict, oem_dict=None): + oem_props = info_dict.get("oem_fingerprint_properties") + if oem_props is None or len(oem_props) == 0: + device = GetBuildProp("ro.product.device", info_dict) + script.AssertDevice(device) + else: + if oem_dict is None: + raise common.ExternalError( + "No OEM file provided to answer expected assertions") + for prop in oem_props.split(): + if oem_dict.get(prop) is None: + raise common.ExternalError( + "The OEM file is missing the property %s" % prop) + script.AssertOemProperty(prop, oem_dict.get(prop)) + + +def HasRecoveryPatch(target_files_zip): + try: + target_files_zip.getinfo("SYSTEM/recovery-from-boot.p") + return True + except KeyError: + return False + +def HasVendorPartition(target_files_zip): + try: + target_files_zip.getinfo("VENDOR/") + return True + except KeyError: + return False + +def GetOemProperty(name, oem_props, oem_dict, info_dict): + if oem_props is not None and name in oem_props: + return oem_dict[name] + return GetBuildProp(name, info_dict) + + +def CalculateFingerprint(oem_props, oem_dict, info_dict): + if oem_props is None: + return GetBuildProp("ro.build.fingerprint", info_dict) + return "%s/%s/%s:%s" % ( + GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), + GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), + GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), + GetBuildProp("ro.build.thumbprint", info_dict)) + + +def GetImage(which, tmpdir, info_dict): + # Return an image object (suitable for passing to BlockImageDiff) + # for the 'which' partition (most be "system" or "vendor"). If a + # prebuilt image and file map are found in tmpdir they are used, + # otherwise they are reconstructed from the individual files. + + assert which in ("system", "vendor") + + path = os.path.join(tmpdir, "IMAGES", which + ".img") + mappath = os.path.join(tmpdir, "IMAGES", which + ".map") + if os.path.exists(path) and os.path.exists(mappath): + print "using %s.img from target-files" % (which,) + # This is a 'new' target-files, which already has the image in it. + + else: + print "building %s.img from target-files" % (which,) + + # This is an 'old' target-files, which does not contain images + # already built. Build them. + + mappath = tempfile.mkstemp()[1] + OPTIONS.tempfiles.append(mappath) + + import add_img_to_target_files + if which == "system": + path = add_img_to_target_files.BuildSystem( + tmpdir, info_dict, block_list=mappath) + elif which == "vendor": + path = add_img_to_target_files.BuildVendor( + tmpdir, info_dict, block_list=mappath) + + return sparse_img.SparseImage(path, mappath) + + +def WriteFullOTAPackage(input_zip, output_zip): + # TODO: how to determine this? We don't know what version it will + # be installed on top of. For now, we expect the API just won't + # change very often. + script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) + + oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + metadata = { + "post-build": CalculateFingerprint(oem_props, oem_dict, + OPTIONS.info_dict), + "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), + } + + device_specific = common.DeviceSpecificParams( + input_zip=input_zip, + input_version=OPTIONS.info_dict["recovery_api_version"], + output_zip=output_zip, + script=script, + input_tmp=OPTIONS.input_tmp, + metadata=metadata, + info_dict=OPTIONS.info_dict) + + has_recovery_patch = HasRecoveryPatch(input_zip) + block_based = OPTIONS.block_based and has_recovery_patch + + if not OPTIONS.omit_prereq: + ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) + ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) + script.AssertOlderBuild(ts, ts_text) + + AppendAssertions(script, OPTIONS.info_dict, oem_dict) + device_specific.FullOTA_Assertions() + + # Two-step package strategy (in chronological order, which is *not* + # the order in which the generated script has things): + # + # if stage is not "2/3" or "3/3": + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # set stage to "" + # do normal full package installation: + # wipe and install system, boot image, etc. + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + recovery_img = common.GetBootableImage("recovery.img", "recovery.img", + OPTIONS.input_tmp, "RECOVERY") + if OPTIONS.two_step: + if not OPTIONS.info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") == "3/3" then +""" % bcb_dev) + + device_specific.FullOTA_InstallBegin() + + system_progress = 0.75 + + if OPTIONS.wipe_user_data: + system_progress -= 0.1 + if HasVendorPartition(input_zip): + system_progress -= 0.1 + + if "selinux_fc" in OPTIONS.info_dict: + WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) + + recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") + + system_items = ItemSet("system", "META/filesystem_config.txt") + script.ShowProgress(system_progress, 0) + + if block_based: + # Full OTA is done as an "incremental" against an empty source + # image. This has the effect of writing new data from the package + # to the entire partition, but lets us reuse the updater code that + # writes incrementals to do it. + system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) + system_tgt.ResetFileMap() + system_diff = common.BlockDifference("system", system_tgt, src=None) + system_diff.WriteScript(script, output_zip) + else: + script.FormatPartition("/system") + script.Mount("/system", recovery_mount_options) + if not has_recovery_patch: + script.UnpackPackageDir("recovery", "/system") + script.UnpackPackageDir("system", "/system") + + symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) + script.MakeSymlinks(symlinks) + + boot_img = common.GetBootableImage("boot.img", "boot.img", + OPTIONS.input_tmp, "BOOT") + + if not block_based: + def output_sink(fn, data): + common.ZipWriteStr(output_zip, "recovery/" + fn, data) + system_items.Get("system/" + fn) + + common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, + recovery_img, boot_img) + + system_items.GetMetadata(input_zip) + system_items.Get("system").SetPermissions(script) + + if HasVendorPartition(input_zip): + vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") + script.ShowProgress(0.1, 0) + + if block_based: + vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) + vendor_tgt.ResetFileMap() + vendor_diff = common.BlockDifference("vendor", vendor_tgt) + vendor_diff.WriteScript(script, output_zip) + else: + script.FormatPartition("/vendor") + script.Mount("/vendor", recovery_mount_options) + script.UnpackPackageDir("vendor", "/vendor") + + symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) + script.MakeSymlinks(symlinks) + + vendor_items.GetMetadata(input_zip) + vendor_items.Get("vendor").SetPermissions(script) + + common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) + common.ZipWriteStr(output_zip, "boot.img", boot_img.data) + + script.ShowProgress(0.05, 5) + script.WriteRawImage("/boot", "boot.img") + + script.ShowProgress(0.2, 10) + device_specific.FullOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + script.UnmountAll() + + if OPTIONS.wipe_user_data: + script.ShowProgress(0.1, 10) + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +""" % bcb_dev) + script.AppendExtra("else\n") + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) + WriteMetadata(metadata, output_zip) + + +def WritePolicyConfig(file_name, output_zip): + common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) + + +def WriteMetadata(metadata, output_zip): + common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", + "".join(["%s=%s\n" % kv + for kv in sorted(metadata.iteritems())])) + + +def LoadPartitionFiles(z, partition): + """Load all the files from the given partition in a given target-files + ZipFile, and return a dict of {filename: File object}.""" + out = {} + prefix = partition.upper() + "/" + for info in z.infolist(): + if info.filename.startswith(prefix) and not IsSymlink(info): + basefilename = info.filename[len(prefix):] + fn = partition + "/" + basefilename + data = z.read(info.filename) + out[fn] = common.File(fn, data) + return out + + +def GetBuildProp(prop, info_dict): + """Return the fingerprint of the build of a given target-files info_dict.""" + try: + return info_dict.get("build.prop", {})[prop] + except KeyError: + raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) + + +def AddToKnownPaths(filename, known_paths): + if filename[-1] == "/": + return + dirs = filename.split("/")[:-1] + while len(dirs) > 0: + path = "/".join(dirs) + if path in known_paths: + break + known_paths.add(path) + dirs.pop() + + +def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): + source_version = OPTIONS.source_info_dict["recovery_api_version"] + target_version = OPTIONS.target_info_dict["recovery_api_version"] + + if source_version == 0: + print ("WARNING: generating edify script for a source that " + "can't install it.") + script = edify_generator.EdifyGenerator(source_version, + OPTIONS.target_info_dict) + + metadata = { + "pre-device": GetBuildProp("ro.product.device", + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + + device_specific = common.DeviceSpecificParams( + source_zip=source_zip, + source_version=source_version, + target_zip=target_zip, + target_version=target_version, + output_zip=output_zip, + script=script, + metadata=metadata, + info_dict=OPTIONS.info_dict) + + source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) + target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp + + source_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", + OPTIONS.source_info_dict) + target_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") + updating_boot = (not OPTIONS.two_step and + (source_boot.data != target_boot.data)) + + target_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") + + system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) + system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) + + blockimgdiff_version = 1 + if OPTIONS.info_dict: + blockimgdiff_version = max( + int(i) for i in + OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) + + system_diff = common.BlockDifference("system", system_tgt, system_src, + check_first_block=True, + version=blockimgdiff_version) + + if HasVendorPartition(target_zip): + if not HasVendorPartition(source_zip): + raise RuntimeError("can't generate incremental that adds /vendor") + vendor_src = GetImage("vendor", OPTIONS.source_tmp, + OPTIONS.source_info_dict) + vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, + OPTIONS.target_info_dict) + vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, + check_first_block=True, + version=blockimgdiff_version) + else: + vendor_diff = None + + oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.target_info_dict.get( + "recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) + device_specific.IncrementalOTA_Assertions() + + # Two-step incremental package strategy (in chronological order, + # which is *not* the order in which the generated script has + # things): + # + # if stage is not "2/3" or "3/3": + # do verification on current system + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # perform update: + # patch system files, etc. + # force full install of new boot image + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + if OPTIONS.two_step: + if not OPTIONS.info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.AppendExtra("sleep(20);\n") + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") != "3/3" then +""" % bcb_dev) + + script.Print("Verifying current system...") + + device_specific.IncrementalOTA_VerifyBegin() + + if oem_props is None: + # When blockimgdiff version is less than 3 (non-resumable block-based OTA), + # patching on a device that's already on the target build will damage the + # system. Because operations like move don't check the block state, they + # always apply the changes unconditionally. + if blockimgdiff_version <= 2: + script.AssertSomeFingerprint(source_fp) + else: + script.AssertSomeFingerprint(source_fp, target_fp) + else: + if blockimgdiff_version <= 2: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + else: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + + if updating_boot: + boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) + d = common.Difference(target_boot, source_boot) + _, _, d = d.ComputePatch() + if d is None: + include_full_boot = True + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + else: + include_full_boot = False + + print "boot target: %d source: %d diff: %d" % ( + target_boot.size, source_boot.size, len(d)) + + common.ZipWriteStr(output_zip, "patch/boot.img.p", d) + + script.PatchCheck("%s:%s:%d:%s:%d:%s" % + (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1)) + + device_specific.IncrementalOTA_VerifyEnd() + + if OPTIONS.two_step: + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +else +""" % bcb_dev) + + # Verify the existing partitions. + system_diff.WriteVerifyScript(script) + if vendor_diff: + vendor_diff.WriteVerifyScript(script) + + script.Comment("---- start making changes here ----") + + device_specific.IncrementalOTA_InstallBegin() + + system_diff.WriteScript(script, output_zip, + progress=0.8 if vendor_diff else 0.9) + if vendor_diff: + vendor_diff.WriteScript(script, output_zip, progress=0.1) + + if OPTIONS.two_step: + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + script.WriteRawImage("/boot", "boot.img") + print "writing full boot image (forced by two-step mode)" + + if not OPTIONS.two_step: + if updating_boot: + if include_full_boot: + print "boot image changed; including full." + script.Print("Installing boot image...") + script.WriteRawImage("/boot", "boot.img") + else: + # Produce the boot image by applying a patch to the current + # contents of the boot partition, and write it back to the + # partition. + print "boot image changed; including patch." + script.Print("Patching boot image...") + script.ShowProgress(0.1, 10) + script.ApplyPatch("%s:%s:%d:%s:%d:%s" + % (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1), + "-", + target_boot.size, target_boot.sha1, + source_boot.sha1, "patch/boot.img.p") + else: + print "boot image unchanged; skipping." + + # Do device-specific installation (eg, write radio image). + device_specific.IncrementalOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + if OPTIONS.wipe_user_data: + script.Print("Erasing user data...") + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + + script.SetProgress(1) + script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) + WriteMetadata(metadata, output_zip) + + +class FileDifference(object): + def __init__(self, partition, source_zip, target_zip, output_zip): + self.deferred_patch_list = None + print "Loading target..." + self.target_data = target_data = LoadPartitionFiles(target_zip, partition) + print "Loading source..." + self.source_data = source_data = LoadPartitionFiles(source_zip, partition) + + self.verbatim_targets = verbatim_targets = [] + self.patch_list = patch_list = [] + diffs = [] + self.renames = renames = {} + known_paths = set() + largest_source_size = 0 + + matching_file_cache = {} + for fn, sf in source_data.items(): + assert fn == sf.name + matching_file_cache["path:" + fn] = sf + if fn in target_data.keys(): + AddToKnownPaths(fn, known_paths) + # Only allow eligibility for filename/sha matching + # if there isn't a perfect path match. + if target_data.get(sf.name) is None: + matching_file_cache["file:" + fn.split("/")[-1]] = sf + matching_file_cache["sha:" + sf.sha1] = sf + + for fn in sorted(target_data.keys()): + tf = target_data[fn] + assert fn == tf.name + sf = ClosestFileMatch(tf, matching_file_cache, renames) + if sf is not None and sf.name != tf.name: + print "File has moved from " + sf.name + " to " + tf.name + renames[sf.name] = tf + + if sf is None or fn in OPTIONS.require_verbatim: + # This file should be included verbatim + if fn in OPTIONS.prohibit_verbatim: + raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) + print "send", fn, "verbatim" + tf.AddToZip(output_zip) + verbatim_targets.append((fn, tf.size, tf.sha1)) + if fn in target_data.keys(): + AddToKnownPaths(fn, known_paths) + elif tf.sha1 != sf.sha1: + # File is different; consider sending as a patch + diffs.append(common.Difference(tf, sf)) + else: + # Target file data identical to source (may still be renamed) + pass + + common.ComputeDifferences(diffs) + + for diff in diffs: + tf, sf, d = diff.GetPatch() + path = "/".join(tf.name.split("/")[:-1]) + if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ + path not in known_paths: + # patch is almost as big as the file; don't bother patching + # or a patch + rename cannot take place due to the target + # directory not existing + tf.AddToZip(output_zip) + verbatim_targets.append((tf.name, tf.size, tf.sha1)) + if sf.name in renames: + del renames[sf.name] + AddToKnownPaths(tf.name, known_paths) + else: + common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) + patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) + largest_source_size = max(largest_source_size, sf.size) + + self.largest_source_size = largest_source_size + + def EmitVerification(self, script): + so_far = 0 + for tf, sf, _, _ in self.patch_list: + if tf.name != sf.name: + script.SkipNextActionIfTargetExists(tf.name, tf.sha1) + script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) + so_far += sf.size + return so_far + + def EmitExplicitTargetVerification(self, script): + for fn, _, sha1 in self.verbatim_targets: + if fn[-1] != "/": + script.FileCheck("/"+fn, sha1) + for tf, _, _, _ in self.patch_list: + script.FileCheck(tf.name, tf.sha1) + + def RemoveUnneededFiles(self, script, extras=()): + script.DeleteFiles( + ["/" + i[0] for i in self.verbatim_targets] + + ["/" + i for i in sorted(self.source_data) + if i not in self.target_data and i not in self.renames] + + list(extras)) + + def TotalPatchSize(self): + return sum(i[1].size for i in self.patch_list) + + def EmitPatches(self, script, total_patch_size, so_far): + self.deferred_patch_list = deferred_patch_list = [] + for item in self.patch_list: + tf, sf, _, _ = item + if tf.name == "system/build.prop": + deferred_patch_list.append(item) + continue + if sf.name != tf.name: + script.SkipNextActionIfTargetExists(tf.name, tf.sha1) + script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1, + "patch/" + sf.name + ".p") + so_far += tf.size + script.SetProgress(so_far / total_patch_size) + return so_far + + def EmitDeferredPatches(self, script): + for item in self.deferred_patch_list: + tf, sf, _, _ = item + script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, + "patch/" + sf.name + ".p") + script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None) + + def EmitRenames(self, script): + if len(self.renames) > 0: + script.Print("Renaming files...") + for src, tgt in self.renames.iteritems(): + print "Renaming " + src + " to " + tgt.name + script.RenameFile(src, tgt.name) + + +def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): + target_has_recovery_patch = HasRecoveryPatch(target_zip) + source_has_recovery_patch = HasRecoveryPatch(source_zip) + + if (OPTIONS.block_based and + target_has_recovery_patch and + source_has_recovery_patch): + return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) + + source_version = OPTIONS.source_info_dict["recovery_api_version"] + target_version = OPTIONS.target_info_dict["recovery_api_version"] + + if source_version == 0: + print ("WARNING: generating edify script for a source that " + "can't install it.") + script = edify_generator.EdifyGenerator(source_version, + OPTIONS.target_info_dict) + + oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + metadata = { + "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + + device_specific = common.DeviceSpecificParams( + source_zip=source_zip, + source_version=source_version, + target_zip=target_zip, + target_version=target_version, + output_zip=output_zip, + script=script, + metadata=metadata, + info_dict=OPTIONS.info_dict) + + system_diff = FileDifference("system", source_zip, target_zip, output_zip) + script.Mount("/system", recovery_mount_options) + if HasVendorPartition(target_zip): + vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) + script.Mount("/vendor", recovery_mount_options) + else: + vendor_diff = None + + target_fp = CalculateFingerprint(oem_props, oem_dict, + OPTIONS.target_info_dict) + source_fp = CalculateFingerprint(oem_props, oem_dict, + OPTIONS.source_info_dict) + + if oem_props is None: + script.AssertSomeFingerprint(source_fp, target_fp) + else: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp + + source_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", + OPTIONS.source_info_dict) + target_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") + updating_boot = (not OPTIONS.two_step and + (source_boot.data != target_boot.data)) + + source_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", + OPTIONS.source_info_dict) + target_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") + updating_recovery = (source_recovery.data != target_recovery.data) + + # Here's how we divide up the progress bar: + # 0.1 for verifying the start state (PatchCheck calls) + # 0.8 for applying patches (ApplyPatch calls) + # 0.1 for unpacking verbatim files, symlinking, and doing the + # device-specific commands. + + AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) + device_specific.IncrementalOTA_Assertions() + + # Two-step incremental package strategy (in chronological order, + # which is *not* the order in which the generated script has + # things): + # + # if stage is not "2/3" or "3/3": + # do verification on current system + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # perform update: + # patch system files, etc. + # force full install of new boot image + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + if OPTIONS.two_step: + if not OPTIONS.info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.AppendExtra("sleep(20);\n") + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") != "3/3" then +""" % bcb_dev) + + script.Print("Verifying current system...") + + device_specific.IncrementalOTA_VerifyBegin() + + script.ShowProgress(0.1, 0) + so_far = system_diff.EmitVerification(script) + if vendor_diff: + so_far += vendor_diff.EmitVerification(script) + + if updating_boot: + d = common.Difference(target_boot, source_boot) + _, _, d = d.ComputePatch() + print "boot target: %d source: %d diff: %d" % ( + target_boot.size, source_boot.size, len(d)) + + common.ZipWriteStr(output_zip, "patch/boot.img.p", d) + + boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) + + script.PatchCheck("%s:%s:%d:%s:%d:%s" % + (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1)) + so_far += source_boot.size + + size = [] + if system_diff.patch_list: + size.append(system_diff.largest_source_size) + if vendor_diff: + if vendor_diff.patch_list: + size.append(vendor_diff.largest_source_size) + if size or updating_recovery or updating_boot: + script.CacheFreeSpaceCheck(max(size)) + + device_specific.IncrementalOTA_VerifyEnd() + + if OPTIONS.two_step: + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +else +""" % bcb_dev) + + script.Comment("---- start making changes here ----") + + device_specific.IncrementalOTA_InstallBegin() + + if OPTIONS.two_step: + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + script.WriteRawImage("/boot", "boot.img") + print "writing full boot image (forced by two-step mode)" + + script.Print("Removing unneeded files...") + system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) + if vendor_diff: + vendor_diff.RemoveUnneededFiles(script) + + script.ShowProgress(0.8, 0) + total_patch_size = 1.0 + system_diff.TotalPatchSize() + if vendor_diff: + total_patch_size += vendor_diff.TotalPatchSize() + if updating_boot: + total_patch_size += target_boot.size + + script.Print("Patching system files...") + so_far = system_diff.EmitPatches(script, total_patch_size, 0) + if vendor_diff: + script.Print("Patching vendor files...") + so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) + + if not OPTIONS.two_step: + if updating_boot: + # Produce the boot image by applying a patch to the current + # contents of the boot partition, and write it back to the + # partition. + script.Print("Patching boot image...") + script.ApplyPatch("%s:%s:%d:%s:%d:%s" + % (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1), + "-", + target_boot.size, target_boot.sha1, + source_boot.sha1, "patch/boot.img.p") + so_far += target_boot.size + script.SetProgress(so_far / total_patch_size) + print "boot image changed; including." + else: + print "boot image unchanged; skipping." + + system_items = ItemSet("system", "META/filesystem_config.txt") + if vendor_diff: + vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") + + if updating_recovery: + # Recovery is generated as a patch using both the boot image + # (which contains the same linux kernel as recovery) and the file + # /system/etc/recovery-resource.dat (which contains all the images + # used in the recovery UI) as sources. This lets us minimize the + # size of the patch, which must be included in every OTA package. + # + # For older builds where recovery-resource.dat is not present, we + # use only the boot image as the source. + + if not target_has_recovery_patch: + def output_sink(fn, data): + common.ZipWriteStr(output_zip, "recovery/" + fn, data) + system_items.Get("system/" + fn) + + common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, + target_recovery, target_boot) + script.DeleteFiles(["/system/recovery-from-boot.p", + "/system/etc/install-recovery.sh"]) + print "recovery image changed; including as patch from boot." + else: + print "recovery image unchanged; skipping." + + script.ShowProgress(0.1, 10) + + target_symlinks = CopyPartitionFiles(system_items, target_zip, None) + if vendor_diff: + target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) + + temp_script = script.MakeTemporary() + system_items.GetMetadata(target_zip) + system_items.Get("system").SetPermissions(temp_script) + if vendor_diff: + vendor_items.GetMetadata(target_zip) + vendor_items.Get("vendor").SetPermissions(temp_script) + + # Note that this call will mess up the trees of Items, so make sure + # we're done with them. + source_symlinks = CopyPartitionFiles(system_items, source_zip, None) + if vendor_diff: + source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) + + target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) + source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) + + # Delete all the symlinks in source that aren't in target. This + # needs to happen before verbatim files are unpacked, in case a + # symlink in the source is replaced by a real file in the target. + to_delete = [] + for dest, link in source_symlinks: + if link not in target_symlinks_d: + to_delete.append(link) + script.DeleteFiles(to_delete) + + if system_diff.verbatim_targets: + script.Print("Unpacking new system files...") + script.UnpackPackageDir("system", "/system") + if vendor_diff and vendor_diff.verbatim_targets: + script.Print("Unpacking new vendor files...") + script.UnpackPackageDir("vendor", "/vendor") + + if updating_recovery and not target_has_recovery_patch: + script.Print("Unpacking new recovery...") + script.UnpackPackageDir("recovery", "/system") + + system_diff.EmitRenames(script) + if vendor_diff: + vendor_diff.EmitRenames(script) + + script.Print("Symlinks and permissions...") + + # Create all the symlinks that don't already exist, or point to + # somewhere different than what we want. Delete each symlink before + # creating it, since the 'symlink' command won't overwrite. + to_create = [] + for dest, link in target_symlinks: + if link in source_symlinks_d: + if dest != source_symlinks_d[link]: + to_create.append((dest, link)) + else: + to_create.append((dest, link)) + script.DeleteFiles([i[1] for i in to_create]) + script.MakeSymlinks(to_create) + + # Now that the symlinks are created, we can set all the + # permissions. + script.AppendScript(temp_script) + + # Do device-specific installation (eg, write radio image). + device_specific.IncrementalOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + # Patch the build.prop file last, so if something fails but the + # device can still come up, it appears to be the old build and will + # get set the OTA package again to retry. + script.Print("Patching remaining system files...") + system_diff.EmitDeferredPatches(script) + + if OPTIONS.wipe_user_data: + script.Print("Erasing user data...") + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + + if OPTIONS.verify and system_diff: + script.Print("Remounting and verifying system partition files...") + script.Unmount("/system") + script.Mount("/system") + system_diff.EmitExplicitTargetVerification(script) + + if OPTIONS.verify and vendor_diff: + script.Print("Remounting and verifying vendor partition files...") + script.Unmount("/vendor") + script.Mount("/vendor") + vendor_diff.EmitExplicitTargetVerification(script) + script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) + + WriteMetadata(metadata, output_zip) + + +def main(argv): + + def option_handler(o, a): + if o == "--board_config": + pass # deprecated + elif o in ("-k", "--package_key"): + OPTIONS.package_key = a + elif o in ("-i", "--incremental_from"): + OPTIONS.incremental_source = a + elif o in ("-w", "--wipe_user_data"): + OPTIONS.wipe_user_data = True + elif o in ("-n", "--no_prereq"): + OPTIONS.omit_prereq = True + elif o in ("-o", "--oem_settings"): + OPTIONS.oem_source = a + elif o in ("-e", "--extra_script"): + OPTIONS.extra_script = a + elif o in ("-a", "--aslr_mode"): + if a in ("on", "On", "true", "True", "yes", "Yes"): + OPTIONS.aslr_mode = True + else: + OPTIONS.aslr_mode = False + elif o in ("-t", "--worker_threads"): + if a.isdigit(): + OPTIONS.worker_threads = int(a) + else: + raise ValueError("Cannot parse value %r for option %r - only " + "integers are allowed." % (a, o)) + elif o in ("-2", "--two_step"): + OPTIONS.two_step = True + elif o == "--no_signing": + OPTIONS.no_signing = True + elif o == "--verify": + OPTIONS.verify = True + elif o == "--block": + OPTIONS.block_based = True + elif o in ("-b", "--binary"): + OPTIONS.updater_binary = a + elif o in ("--no_fallback_to_full",): + OPTIONS.fallback_to_full = False + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="b:k:i:d:wne:t:a:2o:", + extra_long_opts=[ + "board_config=", + "package_key=", + "incremental_from=", + "wipe_user_data", + "no_prereq", + "extra_script=", + "worker_threads=", + "aslr_mode=", + "two_step", + "no_signing", + "block", + "binary=", + "oem_settings=", + "verify", + "no_fallback_to_full", + ], extra_option_handler=option_handler) + + if len(args) != 2: + common.Usage(__doc__) + sys.exit(1) + + if OPTIONS.extra_script is not None: + OPTIONS.extra_script = open(OPTIONS.extra_script).read() + + print "unzipping target target-files..." + OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) + + OPTIONS.target_tmp = OPTIONS.input_tmp + OPTIONS.info_dict = common.LoadInfoDict(input_zip) + + # If this image was originally labelled with SELinux contexts, make sure we + # also apply the labels in our new image. During building, the "file_contexts" + # is in the out/ directory tree, but for repacking from target-files.zip it's + # in the root directory of the ramdisk. + if "selinux_fc" in OPTIONS.info_dict: + OPTIONS.info_dict["selinux_fc"] = os.path.join( + OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts") + + if OPTIONS.verbose: + print "--- target info ---" + common.DumpInfoDict(OPTIONS.info_dict) + + # If the caller explicitly specified the device-specific extensions + # path via -s/--device_specific, use that. Otherwise, use + # META/releasetools.py if it is present in the target target_files. + # Otherwise, take the path of the file from 'tool_extensions' in the + # info dict and look for that in the local filesystem, relative to + # the current directory. + + if OPTIONS.device_specific is None: + from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") + if os.path.exists(from_input): + print "(using device-specific extensions from target_files)" + OPTIONS.device_specific = from_input + else: + OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) + + if OPTIONS.device_specific is not None: + OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) + + while True: + + if OPTIONS.no_signing: + if os.path.exists(args[1]): + os.unlink(args[1]) + output_zip = zipfile.ZipFile(args[1], "w", + compression=zipfile.ZIP_DEFLATED) + else: + temp_zip_file = tempfile.NamedTemporaryFile() + output_zip = zipfile.ZipFile(temp_zip_file, "w", + compression=zipfile.ZIP_DEFLATED) + + if OPTIONS.incremental_source is None: + WriteFullOTAPackage(input_zip, output_zip) + if OPTIONS.package_key is None: + OPTIONS.package_key = OPTIONS.info_dict.get( + "default_system_dev_certificate", + "build/target/product/security/testkey") + break + + else: + print "unzipping source target-files..." + OPTIONS.source_tmp, source_zip = common.UnzipTemp( + OPTIONS.incremental_source) + OPTIONS.target_info_dict = OPTIONS.info_dict + OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) + if "selinux_fc" in OPTIONS.source_info_dict: + OPTIONS.source_info_dict["selinux_fc"] = os.path.join( + OPTIONS.source_tmp, "BOOT", "RAMDISK", "file_contexts") + if OPTIONS.package_key is None: + OPTIONS.package_key = OPTIONS.source_info_dict.get( + "default_system_dev_certificate", + "build/target/product/security/testkey") + if OPTIONS.verbose: + print "--- source info ---" + common.DumpInfoDict(OPTIONS.source_info_dict) + try: + WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) + break + except ValueError: + if not OPTIONS.fallback_to_full: + raise + print "--- failed to build incremental; falling back to full ---" + OPTIONS.incremental_source = None + output_zip.close() + + output_zip.close() + + if not OPTIONS.no_signing: + SignOutput(temp_zip_file.name, args[1]) + temp_zip_file.close() + + print "done." + + +if __name__ == '__main__': + try: + common.CloseInheritedPipes() + main(sys.argv[1:]) + except common.ExternalError as e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) + finally: + common.Cleanup() diff --git a/tools/releasetools/pylintrc b/tools/releasetools/pylintrc new file mode 100644 index 0000000000..90de1afbc8 --- /dev/null +++ b/tools/releasetools/pylintrc @@ -0,0 +1,382 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=invalid-name,missing-docstring,too-many-branches,too-many-locals,too-many-arguments,too-many-statements,duplicate-code,too-few-public-methods,too-many-instance-attributes,too-many-lines,too-many-public-methods,locally-disabled,fixme + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis +ignored-modules= + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,input + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=__.*__ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format=LF + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py index 7279c60a28..8b327fe357 100644 --- a/tools/releasetools/rangelib.py +++ b/tools/releasetools/rangelib.py @@ -24,6 +24,7 @@ class RangeSet(object): lots of runs.""" def __init__(self, data=None): + self.monotonic = False if isinstance(data, str): self._parse_internal(data) elif data: @@ -185,7 +186,7 @@ class RangeSet(object): # This is like intersect, but we can stop as soon as we discover the # output is going to be nonempty. z = 0 - for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), + for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))), zip(other.data, itertools.cycle((+1, -1)))): if (z == 1 and d == 1) or (z == 2 and d == -1): return True diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks deleted file mode 100755 index 9e6105101b..0000000000 --- a/tools/releasetools/sign_target_files_apks +++ /dev/null @@ -1,502 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2008 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Signs all the APK files in a target-files zipfile, producing a new -target-files zip. - -Usage: sign_target_files_apks [flags] input_target_files output_target_files - - -e (--extra_apks) - Add extra APK name/key pairs as though they appeared in - apkcerts.txt (so mappings specified by -k and -d are applied). - Keys specified in -e override any value for that app contained - in the apkcerts.txt file. Option may be repeated to give - multiple extra packages. - - -k (--key_mapping) - Add a mapping from the key name as specified in apkcerts.txt (the - src_key) to the real key you wish to sign the package with - (dest_key). Option may be repeated to give multiple key - mappings. - - -d (--default_key_mappings) - Set up the following key mappings: - - $devkey/devkey ==> $dir/releasekey - $devkey/testkey ==> $dir/releasekey - $devkey/media ==> $dir/media - $devkey/shared ==> $dir/shared - $devkey/platform ==> $dir/platform - - where $devkey is the directory part of the value of - default_system_dev_certificate from the input target-files's - META/misc_info.txt. (Defaulting to "build/target/product/security" - if the value is not present in misc_info. - - -d and -k options are added to the set of mappings in the order - in which they appear on the command line. - - -o (--replace_ota_keys) - Replace the certificate (public key) used by OTA package - verification with the one specified in the input target_files - zip (in the META/otakeys.txt file). Key remapping (-k and -d) - is performed on this key. - - -t (--tag_changes) <+tag>,<-tag>,... - Comma-separated list of changes to make to the set of tags (in - the last component of the build fingerprint). Prefix each with - '+' or '-' to indicate whether that tag should be added or - removed. Changes are processed in the order they appear. - Default value is "-test-keys,-dev-keys,+release-keys". - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import base64 -import cStringIO -import copy -import errno -import os -import re -import shutil -import subprocess -import tempfile -import zipfile - -import add_img_to_target_files -import common - -OPTIONS = common.OPTIONS - -OPTIONS.extra_apks = {} -OPTIONS.key_map = {} -OPTIONS.replace_ota_keys = False -OPTIONS.replace_verity_public_key = False -OPTIONS.replace_verity_private_key = False -OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") - -def GetApkCerts(tf_zip): - certmap = common.ReadApkCerts(tf_zip) - - # apply the key remapping to the contents of the file - for apk, cert in certmap.iteritems(): - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - # apply all the -e options, overriding anything in the file - for apk, cert in OPTIONS.extra_apks.iteritems(): - if not cert: - cert = "PRESIGNED" - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - return certmap - - -def CheckAllApksSigned(input_tf_zip, apk_key_map): - """Check that all the APKs we want to sign have keys specified, and - error out if they don't.""" - unknown_apks = [] - for info in input_tf_zip.infolist(): - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - if name not in apk_key_map: - unknown_apks.append(name) - if unknown_apks: - print "ERROR: no key specified for:\n\n ", - print "\n ".join(unknown_apks) - print "\nUse '-e =' to specify a key (which may be an" - print "empty string to not sign this apk)." - sys.exit(1) - - -def SignApk(data, keyname, pw): - unsigned = tempfile.NamedTemporaryFile() - unsigned.write(data) - unsigned.flush() - - signed = tempfile.NamedTemporaryFile() - - common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) - - data = signed.read() - unsigned.close() - signed.close() - - return data - - -def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, - apk_key_map, key_passwords): - - maxsize = max([len(os.path.basename(i.filename)) - for i in input_tf_zip.infolist() - if i.filename.endswith('.apk')]) - rebuild_recovery = False - - tmpdir = tempfile.mkdtemp() - def write_to_temp(fn, attr, data): - fn = os.path.join(tmpdir, fn) - if fn.endswith("/"): - fn = os.path.join(tmpdir, fn) - os.mkdir(fn) - else: - d = os.path.dirname(fn) - if d and not os.path.exists(d): - os.makedirs(d) - - if attr >> 16 == 0xa1ff: - os.symlink(data, fn) - else: - with open(fn, "wb") as f: - f.write(data) - - for info in input_tf_zip.infolist(): - if info.filename.startswith("IMAGES/"): continue - - data = input_tf_zip.read(info.filename) - out_info = copy.copy(info) - - if (info.filename == "META/misc_info.txt" and - OPTIONS.replace_verity_private_key): - ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, OPTIONS.replace_verity_private_key[1]) - elif (info.filename == "BOOT/RAMDISK/verity_key" and - OPTIONS.replace_verity_public_key): - new_data = ReplaceVerityPublicKey(output_tf_zip, OPTIONS.replace_verity_public_key[1]) - write_to_temp(info.filename, info.external_attr, new_data) - elif (info.filename.startswith("BOOT/") or - info.filename.startswith("RECOVERY/") or - info.filename.startswith("META/") or - info.filename == "SYSTEM/etc/recovery-resource.dat"): - write_to_temp(info.filename, info.external_attr, data) - - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - key = apk_key_map[name] - if key not in common.SPECIAL_CERT_STRINGS: - print " signing: %-*s (%s)" % (maxsize, name, key) - signed_data = SignApk(data, key, key_passwords[key]) - output_tf_zip.writestr(out_info, signed_data) - else: - # an APK we're not supposed to sign. - print "NOT signing: %s" % (name,) - output_tf_zip.writestr(out_info, data) - elif info.filename in ("SYSTEM/build.prop", - "VENDOR/build.prop", - "RECOVERY/RAMDISK/default.prop"): - print "rewriting %s:" % (info.filename,) - new_data = RewriteProps(data, misc_info) - output_tf_zip.writestr(out_info, new_data) - if info.filename == "RECOVERY/RAMDISK/default.prop": - write_to_temp(info.filename, info.external_attr, new_data) - elif info.filename.endswith("mac_permissions.xml"): - print "rewriting %s with new keys." % (info.filename,) - new_data = ReplaceCerts(data) - output_tf_zip.writestr(out_info, new_data) - elif info.filename in ("SYSTEM/recovery-from-boot.p", - "SYSTEM/bin/install-recovery.sh"): - rebuild_recovery = True - elif (OPTIONS.replace_ota_keys and - info.filename in ("RECOVERY/RAMDISK/res/keys", - "SYSTEM/etc/security/otacerts.zip")): - # don't copy these files if we're regenerating them below - pass - elif (OPTIONS.replace_verity_private_key and - info.filename == "META/misc_info.txt"): - pass - elif (OPTIONS.replace_verity_public_key and - info.filename == "BOOT/RAMDISK/verity_key"): - pass - else: - # a non-APK file; copy it verbatim - output_tf_zip.writestr(out_info, data) - - if OPTIONS.replace_ota_keys: - new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) - if new_recovery_keys: - write_to_temp("RECOVERY/RAMDISK/res/keys", 0755 << 16, new_recovery_keys) - - if rebuild_recovery: - recovery_img = common.GetBootableImage( - "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) - boot_img = common.GetBootableImage( - "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) - - def output_sink(fn, data): - output_tf_zip.writestr("SYSTEM/"+fn, data) - - common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, - info_dict=misc_info) - - shutil.rmtree(tmpdir) - - -def ReplaceCerts(data): - """Given a string of data, replace all occurences of a set - of X509 certs with a newer set of X509 certs and return - the updated data string.""" - for old, new in OPTIONS.key_map.iteritems(): - try: - if OPTIONS.verbose: - print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) - f = open(old + ".x509.pem") - old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - f = open(new + ".x509.pem") - new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - # Only match entire certs. - pattern = "\\b"+old_cert16+"\\b" - (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) - if OPTIONS.verbose: - print " Replaced %d occurence(s) of %s.x509.pem with " \ - "%s.x509.pem" % (num, old, new) - except IOError, e: - if (e.errno == errno.ENOENT and not OPTIONS.verbose): - continue - - print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ - "with %s.x509.pem." % (e.filename, e.strerror, old, new) - - return data - - -def EditTags(tags): - """Given a string containing comma-separated tags, apply the edits - specified in OPTIONS.tag_changes and return the updated string.""" - tags = set(tags.split(",")) - for ch in OPTIONS.tag_changes: - if ch[0] == "-": - tags.discard(ch[1:]) - elif ch[0] == "+": - tags.add(ch[1:]) - return ",".join(sorted(tags)) - - -def RewriteProps(data, misc_info): - output = [] - for line in data.split("\n"): - line = line.strip() - original_line = line - if line and line[0] != '#' and "=" in line: - key, value = line.split("=", 1) - if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") - and misc_info.get("oem_fingerprint_properties") is None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") - and misc_info.get("oem_fingerprint_properties") is not None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif key == "ro.build.description": - pieces = value.split(" ") - assert len(pieces) == 5 - pieces[-1] = EditTags(pieces[-1]) - value = " ".join(pieces) - elif key == "ro.build.tags": - value = EditTags(value) - elif key == "ro.build.display.id": - # change, eg, "JWR66N dev-keys" to "JWR66N" - value = value.split() - if len(value) > 1 and value[-1].endswith("-keys"): - value.pop() - value = " ".join(value) - line = key + "=" + value - if line != original_line: - print " replace: ", original_line - print " with: ", line - output.append(line) - return "\n".join(output) + "\n" - - -def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): - try: - keylist = input_tf_zip.read("META/otakeys.txt").split() - except KeyError: - raise common.ExternalError("can't read META/otakeys.txt from input") - - extra_recovery_keys = misc_info.get("extra_recovery_keys", None) - if extra_recovery_keys: - extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" - for k in extra_recovery_keys.split()] - if extra_recovery_keys: - print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) - else: - extra_recovery_keys = [] - - mapped_keys = [] - for k in keylist: - m = re.match(r"^(.*)\.x509\.pem$", k) - if not m: - raise common.ExternalError( - "can't parse \"%s\" from META/otakeys.txt" % (k,)) - k = m.group(1) - mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") - - if mapped_keys: - print "using:\n ", "\n ".join(mapped_keys) - print "for OTA package verification" - else: - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - mapped_keys.append( - OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") - print "META/otakeys.txt has no keys; using", mapped_keys[0] - - # recovery uses a version of the key that has been slightly - # predigested (by DumpPublicKey.java) and put in res/keys. - # extra_recovery_keys are used only in recovery. - - p = common.Run(["java", "-jar", - os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] - + mapped_keys + extra_recovery_keys, - stdout=subprocess.PIPE) - new_recovery_keys, _ = p.communicate() - if p.returncode != 0: - raise common.ExternalError("failed to run dumpkeys") - common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", - new_recovery_keys) - - # SystemUpdateActivity uses the x509.pem version of the keys, but - # put into a zipfile system/etc/security/otacerts.zip. - # We DO NOT include the extra_recovery_keys (if any) here. - - tempfile = cStringIO.StringIO() - certs_zip = zipfile.ZipFile(tempfile, "w") - for k in mapped_keys: - certs_zip.write(k) - certs_zip.close() - common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", - tempfile.getvalue()) - - return new_recovery_keys - -def ReplaceVerityPublicKey(targetfile_zip, key_path): - print "Replacing verity public key with %s" % key_path - with open(key_path) as f: - data = f.read() - common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) - return data - -def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, misc_info, key_path): - print "Replacing verity private key with %s" % key_path - current_key = misc_info["verity_key"] - original_misc_info = targetfile_input_zip.read("META/misc_info.txt") - new_misc_info = original_misc_info.replace(current_key, key_path) - common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) - misc_info["verity_key"] = key_path - -def BuildKeyMap(misc_info, key_mapping_options): - for s, d in key_mapping_options: - if s is None: # -d option - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - devkeydir = os.path.dirname(devkey) - - OPTIONS.key_map.update({ - devkeydir + "/testkey": d + "/releasekey", - devkeydir + "/devkey": d + "/releasekey", - devkeydir + "/media": d + "/media", - devkeydir + "/shared": d + "/shared", - devkeydir + "/platform": d + "/platform", - }) - else: - OPTIONS.key_map[s] = d - - -def main(argv): - - key_mapping_options = [] - - def option_handler(o, a): - if o in ("-e", "--extra_apks"): - names, key = a.split("=") - names = names.split(",") - for n in names: - OPTIONS.extra_apks[n] = key - elif o in ("-d", "--default_key_mappings"): - key_mapping_options.append((None, a)) - elif o in ("-k", "--key_mapping"): - key_mapping_options.append(a.split("=", 1)) - elif o in ("-o", "--replace_ota_keys"): - OPTIONS.replace_ota_keys = True - elif o in ("-t", "--tag_changes"): - new = [] - for i in a.split(","): - i = i.strip() - if not i or i[0] not in "-+": - raise ValueError("Bad tag change '%s'" % (i,)) - new.append(i[0] + i[1:].strip()) - OPTIONS.tag_changes = tuple(new) - elif o == "--replace_verity_public_key": - OPTIONS.replace_verity_public_key = (True, a) - elif o == "--replace_verity_private_key": - OPTIONS.replace_verity_private_key = (True, a) - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="e:d:k:ot:", - extra_long_opts=["extra_apks=", - "default_key_mappings=", - "key_mapping=", - "replace_ota_keys", - "tag_changes=", - "replace_verity_public_key=", - "replace_verity_private_key="], - extra_option_handler=option_handler) - - if len(args) != 2: - common.Usage(__doc__) - sys.exit(1) - - input_zip = zipfile.ZipFile(args[0], "r") - output_zip = zipfile.ZipFile(args[1], "w") - - misc_info = common.LoadInfoDict(input_zip) - - BuildKeyMap(misc_info, key_mapping_options) - - apk_key_map = GetApkCerts(input_zip) - CheckAllApksSigned(input_zip, apk_key_map) - - key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) - ProcessTargetFiles(input_zip, output_zip, misc_info, - apk_key_map, key_passwords) - - input_zip.close() - output_zip.close() - - add_img_to_target_files.AddImagesToTargetFiles(args[1]) - - print "done." - - -if __name__ == '__main__': - try: - main(sys.argv[1:]) - except common.ExternalError, e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks new file mode 120000 index 0000000000..b5ec59a253 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks @@ -0,0 +1 @@ +sign_target_files_apks.py \ No newline at end of file diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py new file mode 100755 index 0000000000..d47cc4ff94 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks.py @@ -0,0 +1,506 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Signs all the APK files in a target-files zipfile, producing a new +target-files zip. + +Usage: sign_target_files_apks [flags] input_target_files output_target_files + + -e (--extra_apks) + Add extra APK name/key pairs as though they appeared in + apkcerts.txt (so mappings specified by -k and -d are applied). + Keys specified in -e override any value for that app contained + in the apkcerts.txt file. Option may be repeated to give + multiple extra packages. + + -k (--key_mapping) + Add a mapping from the key name as specified in apkcerts.txt (the + src_key) to the real key you wish to sign the package with + (dest_key). Option may be repeated to give multiple key + mappings. + + -d (--default_key_mappings) + Set up the following key mappings: + + $devkey/devkey ==> $dir/releasekey + $devkey/testkey ==> $dir/releasekey + $devkey/media ==> $dir/media + $devkey/shared ==> $dir/shared + $devkey/platform ==> $dir/platform + + where $devkey is the directory part of the value of + default_system_dev_certificate from the input target-files's + META/misc_info.txt. (Defaulting to "build/target/product/security" + if the value is not present in misc_info. + + -d and -k options are added to the set of mappings in the order + in which they appear on the command line. + + -o (--replace_ota_keys) + Replace the certificate (public key) used by OTA package + verification with the one specified in the input target_files + zip (in the META/otakeys.txt file). Key remapping (-k and -d) + is performed on this key. + + -t (--tag_changes) <+tag>,<-tag>,... + Comma-separated list of changes to make to the set of tags (in + the last component of the build fingerprint). Prefix each with + '+' or '-' to indicate whether that tag should be added or + removed. Changes are processed in the order they appear. + Default value is "-test-keys,-dev-keys,+release-keys". + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import base64 +import cStringIO +import copy +import errno +import os +import re +import shutil +import subprocess +import tempfile +import zipfile + +import add_img_to_target_files +import common + +OPTIONS = common.OPTIONS + +OPTIONS.extra_apks = {} +OPTIONS.key_map = {} +OPTIONS.replace_ota_keys = False +OPTIONS.replace_verity_public_key = False +OPTIONS.replace_verity_private_key = False +OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") + +def GetApkCerts(tf_zip): + certmap = common.ReadApkCerts(tf_zip) + + # apply the key remapping to the contents of the file + for apk, cert in certmap.iteritems(): + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + # apply all the -e options, overriding anything in the file + for apk, cert in OPTIONS.extra_apks.iteritems(): + if not cert: + cert = "PRESIGNED" + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + return certmap + + +def CheckAllApksSigned(input_tf_zip, apk_key_map): + """Check that all the APKs we want to sign have keys specified, and + error out if they don't.""" + unknown_apks = [] + for info in input_tf_zip.infolist(): + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + if name not in apk_key_map: + unknown_apks.append(name) + if unknown_apks: + print "ERROR: no key specified for:\n\n ", + print "\n ".join(unknown_apks) + print "\nUse '-e =' to specify a key (which may be an" + print "empty string to not sign this apk)." + sys.exit(1) + + +def SignApk(data, keyname, pw): + unsigned = tempfile.NamedTemporaryFile() + unsigned.write(data) + unsigned.flush() + + signed = tempfile.NamedTemporaryFile() + + common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) + + data = signed.read() + unsigned.close() + signed.close() + + return data + + +def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, + apk_key_map, key_passwords): + + maxsize = max([len(os.path.basename(i.filename)) + for i in input_tf_zip.infolist() + if i.filename.endswith('.apk')]) + rebuild_recovery = False + + tmpdir = tempfile.mkdtemp() + def write_to_temp(fn, attr, data): + fn = os.path.join(tmpdir, fn) + if fn.endswith("/"): + fn = os.path.join(tmpdir, fn) + os.mkdir(fn) + else: + d = os.path.dirname(fn) + if d and not os.path.exists(d): + os.makedirs(d) + + if attr >> 16 == 0xa1ff: + os.symlink(data, fn) + else: + with open(fn, "wb") as f: + f.write(data) + + for info in input_tf_zip.infolist(): + if info.filename.startswith("IMAGES/"): + continue + + data = input_tf_zip.read(info.filename) + out_info = copy.copy(info) + + if (info.filename == "META/misc_info.txt" and + OPTIONS.replace_verity_private_key): + ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, + OPTIONS.replace_verity_private_key[1]) + elif (info.filename == "BOOT/RAMDISK/verity_key" and + OPTIONS.replace_verity_public_key): + new_data = ReplaceVerityPublicKey(output_tf_zip, + OPTIONS.replace_verity_public_key[1]) + write_to_temp(info.filename, info.external_attr, new_data) + elif (info.filename.startswith("BOOT/") or + info.filename.startswith("RECOVERY/") or + info.filename.startswith("META/") or + info.filename == "SYSTEM/etc/recovery-resource.dat"): + write_to_temp(info.filename, info.external_attr, data) + + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + key = apk_key_map[name] + if key not in common.SPECIAL_CERT_STRINGS: + print " signing: %-*s (%s)" % (maxsize, name, key) + signed_data = SignApk(data, key, key_passwords[key]) + output_tf_zip.writestr(out_info, signed_data) + else: + # an APK we're not supposed to sign. + print "NOT signing: %s" % (name,) + output_tf_zip.writestr(out_info, data) + elif info.filename in ("SYSTEM/build.prop", + "VENDOR/build.prop", + "RECOVERY/RAMDISK/default.prop"): + print "rewriting %s:" % (info.filename,) + new_data = RewriteProps(data, misc_info) + output_tf_zip.writestr(out_info, new_data) + if info.filename == "RECOVERY/RAMDISK/default.prop": + write_to_temp(info.filename, info.external_attr, new_data) + elif info.filename.endswith("mac_permissions.xml"): + print "rewriting %s with new keys." % (info.filename,) + new_data = ReplaceCerts(data) + output_tf_zip.writestr(out_info, new_data) + elif info.filename in ("SYSTEM/recovery-from-boot.p", + "SYSTEM/bin/install-recovery.sh"): + rebuild_recovery = True + elif (OPTIONS.replace_ota_keys and + info.filename in ("RECOVERY/RAMDISK/res/keys", + "SYSTEM/etc/security/otacerts.zip")): + # don't copy these files if we're regenerating them below + pass + elif (OPTIONS.replace_verity_private_key and + info.filename == "META/misc_info.txt"): + pass + elif (OPTIONS.replace_verity_public_key and + info.filename == "BOOT/RAMDISK/verity_key"): + pass + else: + # a non-APK file; copy it verbatim + output_tf_zip.writestr(out_info, data) + + if OPTIONS.replace_ota_keys: + new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) + if new_recovery_keys: + write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys) + + if rebuild_recovery: + recovery_img = common.GetBootableImage( + "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) + boot_img = common.GetBootableImage( + "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) + + def output_sink(fn, data): + output_tf_zip.writestr("SYSTEM/"+fn, data) + + common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, + info_dict=misc_info) + + shutil.rmtree(tmpdir) + + +def ReplaceCerts(data): + """Given a string of data, replace all occurences of a set + of X509 certs with a newer set of X509 certs and return + the updated data string.""" + for old, new in OPTIONS.key_map.iteritems(): + try: + if OPTIONS.verbose: + print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) + f = open(old + ".x509.pem") + old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + f = open(new + ".x509.pem") + new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + # Only match entire certs. + pattern = "\\b"+old_cert16+"\\b" + (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) + if OPTIONS.verbose: + print " Replaced %d occurence(s) of %s.x509.pem with " \ + "%s.x509.pem" % (num, old, new) + except IOError as e: + if e.errno == errno.ENOENT and not OPTIONS.verbose: + continue + + print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ + "with %s.x509.pem." % (e.filename, e.strerror, old, new) + + return data + + +def EditTags(tags): + """Given a string containing comma-separated tags, apply the edits + specified in OPTIONS.tag_changes and return the updated string.""" + tags = set(tags.split(",")) + for ch in OPTIONS.tag_changes: + if ch[0] == "-": + tags.discard(ch[1:]) + elif ch[0] == "+": + tags.add(ch[1:]) + return ",".join(sorted(tags)) + + +def RewriteProps(data, misc_info): + output = [] + for line in data.split("\n"): + line = line.strip() + original_line = line + if line and line[0] != '#' and "=" in line: + key, value = line.split("=", 1) + if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") + and misc_info.get("oem_fingerprint_properties") is None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") + and misc_info.get("oem_fingerprint_properties") is not None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif key == "ro.build.description": + pieces = value.split(" ") + assert len(pieces) == 5 + pieces[-1] = EditTags(pieces[-1]) + value = " ".join(pieces) + elif key == "ro.build.tags": + value = EditTags(value) + elif key == "ro.build.display.id": + # change, eg, "JWR66N dev-keys" to "JWR66N" + value = value.split() + if len(value) > 1 and value[-1].endswith("-keys"): + value.pop() + value = " ".join(value) + line = key + "=" + value + if line != original_line: + print " replace: ", original_line + print " with: ", line + output.append(line) + return "\n".join(output) + "\n" + + +def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): + try: + keylist = input_tf_zip.read("META/otakeys.txt").split() + except KeyError: + raise common.ExternalError("can't read META/otakeys.txt from input") + + extra_recovery_keys = misc_info.get("extra_recovery_keys", None) + if extra_recovery_keys: + extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" + for k in extra_recovery_keys.split()] + if extra_recovery_keys: + print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) + else: + extra_recovery_keys = [] + + mapped_keys = [] + for k in keylist: + m = re.match(r"^(.*)\.x509\.pem$", k) + if not m: + raise common.ExternalError( + "can't parse \"%s\" from META/otakeys.txt" % (k,)) + k = m.group(1) + mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") + + if mapped_keys: + print "using:\n ", "\n ".join(mapped_keys) + print "for OTA package verification" + else: + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + mapped_keys.append( + OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") + print "META/otakeys.txt has no keys; using", mapped_keys[0] + + # recovery uses a version of the key that has been slightly + # predigested (by DumpPublicKey.java) and put in res/keys. + # extra_recovery_keys are used only in recovery. + + p = common.Run(["java", "-jar", + os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + + mapped_keys + extra_recovery_keys, + stdout=subprocess.PIPE) + new_recovery_keys, _ = p.communicate() + if p.returncode != 0: + raise common.ExternalError("failed to run dumpkeys") + common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", + new_recovery_keys) + + # SystemUpdateActivity uses the x509.pem version of the keys, but + # put into a zipfile system/etc/security/otacerts.zip. + # We DO NOT include the extra_recovery_keys (if any) here. + + temp_file = cStringIO.StringIO() + certs_zip = zipfile.ZipFile(temp_file, "w") + for k in mapped_keys: + certs_zip.write(k) + certs_zip.close() + common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", + temp_file.getvalue()) + + return new_recovery_keys + +def ReplaceVerityPublicKey(targetfile_zip, key_path): + print "Replacing verity public key with %s" % key_path + with open(key_path) as f: + data = f.read() + common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) + return data + +def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, + misc_info, key_path): + print "Replacing verity private key with %s" % key_path + current_key = misc_info["verity_key"] + original_misc_info = targetfile_input_zip.read("META/misc_info.txt") + new_misc_info = original_misc_info.replace(current_key, key_path) + common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) + misc_info["verity_key"] = key_path + +def BuildKeyMap(misc_info, key_mapping_options): + for s, d in key_mapping_options: + if s is None: # -d option + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + devkeydir = os.path.dirname(devkey) + + OPTIONS.key_map.update({ + devkeydir + "/testkey": d + "/releasekey", + devkeydir + "/devkey": d + "/releasekey", + devkeydir + "/media": d + "/media", + devkeydir + "/shared": d + "/shared", + devkeydir + "/platform": d + "/platform", + }) + else: + OPTIONS.key_map[s] = d + + +def main(argv): + + key_mapping_options = [] + + def option_handler(o, a): + if o in ("-e", "--extra_apks"): + names, key = a.split("=") + names = names.split(",") + for n in names: + OPTIONS.extra_apks[n] = key + elif o in ("-d", "--default_key_mappings"): + key_mapping_options.append((None, a)) + elif o in ("-k", "--key_mapping"): + key_mapping_options.append(a.split("=", 1)) + elif o in ("-o", "--replace_ota_keys"): + OPTIONS.replace_ota_keys = True + elif o in ("-t", "--tag_changes"): + new = [] + for i in a.split(","): + i = i.strip() + if not i or i[0] not in "-+": + raise ValueError("Bad tag change '%s'" % (i,)) + new.append(i[0] + i[1:].strip()) + OPTIONS.tag_changes = tuple(new) + elif o == "--replace_verity_public_key": + OPTIONS.replace_verity_public_key = (True, a) + elif o == "--replace_verity_private_key": + OPTIONS.replace_verity_private_key = (True, a) + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="e:d:k:ot:", + extra_long_opts=["extra_apks=", + "default_key_mappings=", + "key_mapping=", + "replace_ota_keys", + "tag_changes=", + "replace_verity_public_key=", + "replace_verity_private_key="], + extra_option_handler=option_handler) + + if len(args) != 2: + common.Usage(__doc__) + sys.exit(1) + + input_zip = zipfile.ZipFile(args[0], "r") + output_zip = zipfile.ZipFile(args[1], "w") + + misc_info = common.LoadInfoDict(input_zip) + + BuildKeyMap(misc_info, key_mapping_options) + + apk_key_map = GetApkCerts(input_zip) + CheckAllApksSigned(input_zip, apk_key_map) + + key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) + ProcessTargetFiles(input_zip, output_zip, misc_info, + apk_key_map, key_passwords) + + input_zip.close() + output_zip.close() + + add_img_to_target_files.AddImagesToTargetFiles(args[1]) + + print "done." + + +if __name__ == '__main__': + try: + main(sys.argv[1:]) + except common.ExternalError, e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py index 7574747f5d..b97bb8493c 100644 --- a/tools/releasetools/sparse_img.py +++ b/tools/releasetools/sparse_img.py @@ -14,12 +14,11 @@ import bisect import os -import sys import struct -import pprint from hashlib import sha1 -from rangelib import * +import rangelib + class SparseImage(object): """Wraps a sparse image file (and optional file map) into an image @@ -39,7 +38,6 @@ class SparseImage(object): self.blocksize = blk_sz = header[5] self.total_blocks = total_blks = header[6] total_chunks = header[7] - image_checksum = header[8] if magic != 0xED26FF3A: raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,)) @@ -64,7 +62,6 @@ class SparseImage(object): header_bin = f.read(12) header = struct.unpack("<2H2I", header_bin) chunk_type = header[0] - reserved1 = header[1] chunk_sz = header[2] total_sz = header[3] data_sz = total_sz - 12 @@ -102,7 +99,7 @@ class SparseImage(object): raise ValueError("Unknown chunk type 0x%04X not supported" % (chunk_type,)) - self.care_map = RangeSet(care_data) + self.care_map = rangelib.RangeSet(care_data) self.offset_index = [i[0] for i in offset_map] if file_map_fn: @@ -166,7 +163,7 @@ class SparseImage(object): with open(fn) as f: for line in f: fn, ranges = line.split(None, 1) - ranges = RangeSet.parse(ranges) + ranges = rangelib.RangeSet.parse(ranges) out[fn] = ranges assert ranges.size() == ranges.intersect(remaining).size() remaining = remaining.subtract(ranges) @@ -186,7 +183,7 @@ class SparseImage(object): for s, e in remaining: for b in range(s, e): idx = bisect.bisect_right(self.offset_index, b) - 1 - chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] + chunk_start, _, filepos, fill_data = self.offset_map[idx] if filepos is not None: filepos += (b-chunk_start) * self.blocksize f.seek(filepos, os.SEEK_SET) @@ -204,8 +201,8 @@ class SparseImage(object): nonzero_blocks.append(b) nonzero_blocks.append(b+1) - out["__ZERO"] = RangeSet(data=zero_blocks) - out["__NONZERO"] = RangeSet(data=nonzero_blocks) + out["__ZERO"] = rangelib.RangeSet(data=zero_blocks) + out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks) def ResetFileMap(self): """Throw away the file map and treat the entire image as