2009-04-02 21:14:19 +02:00
|
|
|
# Copyright (C) 2008 The Android Open Source Project
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2017-01-10 19:47:58 +01:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2017-12-02 01:19:46 +01:00
|
|
|
import base64
|
2018-12-28 02:34:18 +01:00
|
|
|
import collections
|
2010-09-13 00:26:16 +02:00
|
|
|
import copy
|
2009-05-22 22:34:54 +02:00
|
|
|
import errno
|
2019-03-20 19:26:06 +01:00
|
|
|
import fnmatch
|
2009-04-02 21:14:19 +02:00
|
|
|
import getopt
|
|
|
|
import getpass
|
2017-08-14 15:49:21 +02:00
|
|
|
import gzip
|
2009-06-22 20:32:31 +02:00
|
|
|
import imp
|
2018-10-12 19:30:39 +02:00
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
import logging.config
|
2009-04-02 21:14:19 +02:00
|
|
|
import os
|
2010-12-14 01:25:36 +01:00
|
|
|
import platform
|
2009-04-02 21:14:19 +02:00
|
|
|
import re
|
2013-03-18 18:31:26 +01:00
|
|
|
import shlex
|
2009-04-02 21:14:19 +02:00
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import tempfile
|
2010-09-13 00:26:16 +02:00
|
|
|
import threading
|
|
|
|
import time
|
2009-06-15 23:31:53 +02:00
|
|
|
import zipfile
|
2018-01-31 21:18:52 +01:00
|
|
|
from hashlib import sha1, sha256
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2014-08-26 19:40:28 +02:00
|
|
|
import blockimgdiff
|
2018-02-01 02:32:40 +01:00
|
|
|
import sparse_img
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2018-10-12 19:30:39 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2018-10-05 00:46:16 +02:00
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
class Options(object):
|
|
|
|
def __init__(self):
|
2019-03-06 18:00:45 +01:00
|
|
|
base_out_path = os.getenv('OUT_DIR_COMMON_BASE')
|
|
|
|
if base_out_path is None:
|
|
|
|
base_search_path = "out"
|
|
|
|
else:
|
2019-03-15 18:44:43 +01:00
|
|
|
base_search_path = os.path.join(base_out_path,
|
|
|
|
os.path.basename(os.getcwd()))
|
2019-03-06 18:00:45 +01:00
|
|
|
|
2019-06-25 00:33:41 +02:00
|
|
|
# Python >= 3.3 returns 'linux', whereas Python 2.7 gives 'linux2'.
|
2015-03-24 03:13:21 +01:00
|
|
|
platform_search_path = {
|
2019-06-25 00:33:41 +02:00
|
|
|
"linux": os.path.join(base_search_path, "host/linux-x86"),
|
2019-03-06 18:00:45 +01:00
|
|
|
"linux2": os.path.join(base_search_path, "host/linux-x86"),
|
|
|
|
"darwin": os.path.join(base_search_path, "host/darwin-x86"),
|
2014-09-09 23:59:20 +02:00
|
|
|
}
|
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
self.search_path = platform_search_path.get(sys.platform)
|
2015-03-24 03:13:21 +01:00
|
|
|
self.signapk_path = "framework/signapk.jar" # Relative to search_path
|
2015-12-10 22:38:50 +01:00
|
|
|
self.signapk_shared_library_path = "lib64" # Relative to search_path
|
2015-03-24 03:13:21 +01:00
|
|
|
self.extra_signapk_args = []
|
|
|
|
self.java_path = "java" # Use the one on the path by default.
|
2016-11-08 21:08:53 +01:00
|
|
|
self.java_args = ["-Xmx2048m"] # The default JVM args.
|
2015-03-24 03:13:21 +01:00
|
|
|
self.public_key_suffix = ".x509.pem"
|
|
|
|
self.private_key_suffix = ".pk8"
|
2015-03-28 00:37:23 +01:00
|
|
|
# use otatools built boot_signer by default
|
|
|
|
self.boot_signer_path = "boot_signer"
|
2015-06-10 00:48:14 +02:00
|
|
|
self.boot_signer_args = []
|
|
|
|
self.verity_signer_path = None
|
|
|
|
self.verity_signer_args = []
|
2015-03-24 03:13:21 +01:00
|
|
|
self.verbose = False
|
|
|
|
self.tempfiles = []
|
|
|
|
self.device_specific = None
|
|
|
|
self.extras = {}
|
|
|
|
self.info_dict = None
|
2015-10-14 01:37:12 +02:00
|
|
|
self.source_info_dict = None
|
|
|
|
self.target_info_dict = None
|
2015-03-24 03:13:21 +01:00
|
|
|
self.worker_threads = None
|
2015-08-08 04:49:45 +02:00
|
|
|
# Stash size cannot exceed cache_size * threshold.
|
|
|
|
self.cache_size = None
|
|
|
|
self.stash_threshold = 0.8
|
2015-03-24 03:13:21 +01:00
|
|
|
|
|
|
|
|
|
|
|
OPTIONS = Options()
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2018-10-11 23:08:45 +02:00
|
|
|
# The block size that's used across the releasetools scripts.
|
|
|
|
BLOCK_SIZE = 4096
|
|
|
|
|
2009-12-16 00:06:55 +01:00
|
|
|
# Values for "certificate" in apkcerts that mean special things.
|
|
|
|
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
|
|
|
|
|
2019-03-21 18:18:05 +01:00
|
|
|
# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
|
|
|
|
# that system_other is not in the list because we don't want to include its
|
|
|
|
# descriptor into vbmeta.img.
|
2019-06-25 08:58:13 +02:00
|
|
|
AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
|
|
|
|
'system_ext', 'vendor')
|
2017-11-14 20:27:32 +01:00
|
|
|
|
2019-06-04 08:07:58 +02:00
|
|
|
# Chained VBMeta partitions.
|
|
|
|
AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
|
|
|
|
|
2018-09-12 20:49:33 +02:00
|
|
|
# Partitions that should have their care_map added to META/care_map.pb
|
2019-06-25 08:58:13 +02:00
|
|
|
PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'system_ext', 'odm')
|
2018-09-12 20:49:33 +02:00
|
|
|
|
|
|
|
|
2016-05-25 02:34:52 +02:00
|
|
|
class ErrorCode(object):
|
|
|
|
"""Define error_codes for failures that happen during the actual
|
|
|
|
update package installation.
|
|
|
|
|
|
|
|
Error codes 0-999 are reserved for failures before the package
|
|
|
|
installation (i.e. low battery, package verification failure).
|
|
|
|
Detailed code in 'bootable/recovery/error_code.h' """
|
|
|
|
|
|
|
|
SYSTEM_VERIFICATION_FAILURE = 1000
|
|
|
|
SYSTEM_UPDATE_FAILURE = 1001
|
|
|
|
SYSTEM_UNEXPECTED_CONTENTS = 1002
|
|
|
|
SYSTEM_NONZERO_CONTENTS = 1003
|
|
|
|
SYSTEM_RECOVER_FAILURE = 1004
|
|
|
|
VENDOR_VERIFICATION_FAILURE = 2000
|
|
|
|
VENDOR_UPDATE_FAILURE = 2001
|
|
|
|
VENDOR_UNEXPECTED_CONTENTS = 2002
|
|
|
|
VENDOR_NONZERO_CONTENTS = 2003
|
|
|
|
VENDOR_RECOVER_FAILURE = 2004
|
|
|
|
OEM_PROP_MISMATCH = 3000
|
|
|
|
FINGERPRINT_MISMATCH = 3001
|
|
|
|
THUMBPRINT_MISMATCH = 3002
|
|
|
|
OLDER_BUILD = 3003
|
|
|
|
DEVICE_MISMATCH = 3004
|
|
|
|
BAD_PATCH_FILE = 3005
|
|
|
|
INSUFFICIENT_CACHE_SPACE = 3006
|
|
|
|
TUNE_PARTITION_FAILURE = 3007
|
|
|
|
APPLY_PATCH_FAILURE = 3008
|
2009-12-16 00:06:55 +01:00
|
|
|
|
2018-03-22 05:02:19 +01:00
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
class ExternalError(RuntimeError):
|
|
|
|
pass
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
2018-10-12 19:30:39 +02:00
|
|
|
def InitLogging():
|
|
|
|
DEFAULT_LOGGING_CONFIG = {
|
|
|
|
'version': 1,
|
|
|
|
'disable_existing_loggers': False,
|
|
|
|
'formatters': {
|
|
|
|
'standard': {
|
|
|
|
'format':
|
|
|
|
'%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
|
|
|
|
'datefmt': '%Y-%m-%d %H:%M:%S',
|
|
|
|
},
|
|
|
|
},
|
|
|
|
'handlers': {
|
|
|
|
'default': {
|
|
|
|
'class': 'logging.StreamHandler',
|
|
|
|
'formatter': 'standard',
|
|
|
|
},
|
|
|
|
},
|
|
|
|
'loggers': {
|
|
|
|
'': {
|
|
|
|
'handlers': ['default'],
|
|
|
|
'level': 'WARNING',
|
|
|
|
'propagate': True,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
env_config = os.getenv('LOGGING_CONFIG')
|
|
|
|
if env_config:
|
|
|
|
with open(env_config) as f:
|
|
|
|
config = json.load(f)
|
|
|
|
else:
|
|
|
|
config = DEFAULT_LOGGING_CONFIG
|
|
|
|
|
|
|
|
# Increase the logging level for verbose mode.
|
|
|
|
if OPTIONS.verbose:
|
|
|
|
config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
|
|
|
|
config['loggers']['']['level'] = 'INFO'
|
|
|
|
|
|
|
|
logging.config.dictConfig(config)
|
|
|
|
|
|
|
|
|
2017-05-04 20:10:47 +02:00
|
|
|
def Run(args, verbose=None, **kwargs):
|
2018-10-05 01:25:33 +02:00
|
|
|
"""Creates and returns a subprocess.Popen object.
|
2017-05-04 20:10:47 +02:00
|
|
|
|
2018-10-05 01:25:33 +02:00
|
|
|
Args:
|
|
|
|
args: The command represented as a list of strings.
|
2018-10-12 19:30:39 +02:00
|
|
|
verbose: Whether the commands should be shown. Default to the global
|
|
|
|
verbosity if unspecified.
|
2018-10-05 01:25:33 +02:00
|
|
|
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
|
|
|
|
stdin, etc. stdout and stderr will default to subprocess.PIPE and
|
|
|
|
subprocess.STDOUT respectively unless caller specifies any of them.
|
2017-12-02 01:19:46 +01:00
|
|
|
universal_newlines will default to True, as most of the users in
|
|
|
|
releasetools expect string output.
|
2018-10-05 01:25:33 +02:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A subprocess.Popen object.
|
2017-05-04 20:10:47 +02:00
|
|
|
"""
|
2018-10-05 01:25:33 +02:00
|
|
|
if 'stdout' not in kwargs and 'stderr' not in kwargs:
|
|
|
|
kwargs['stdout'] = subprocess.PIPE
|
|
|
|
kwargs['stderr'] = subprocess.STDOUT
|
2017-12-02 01:19:46 +01:00
|
|
|
if 'universal_newlines' not in kwargs:
|
|
|
|
kwargs['universal_newlines'] = True
|
2018-10-12 19:30:39 +02:00
|
|
|
# Don't log any if caller explicitly says so.
|
|
|
|
if verbose != False:
|
|
|
|
logger.info(" Running: \"%s\"", " ".join(args))
|
2009-04-02 21:14:19 +02:00
|
|
|
return subprocess.Popen(args, **kwargs)
|
|
|
|
|
|
|
|
|
2019-02-02 00:52:10 +01:00
|
|
|
def RunAndWait(args, verbose=None, **kwargs):
|
2019-02-22 03:53:37 +01:00
|
|
|
"""Runs the given command waiting for it to complete.
|
2019-02-02 00:52:10 +01:00
|
|
|
|
|
|
|
Args:
|
|
|
|
args: The command represented as a list of strings.
|
|
|
|
verbose: Whether the commands should be shown. Default to the global
|
|
|
|
verbosity if unspecified.
|
|
|
|
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
|
|
|
|
stdin, etc. stdout and stderr will default to subprocess.PIPE and
|
|
|
|
subprocess.STDOUT respectively unless caller specifies any of them.
|
|
|
|
|
2019-02-22 03:53:37 +01:00
|
|
|
Raises:
|
|
|
|
ExternalError: On non-zero exit from the command.
|
2019-02-02 00:52:10 +01:00
|
|
|
"""
|
|
|
|
proc = Run(args, verbose=verbose, **kwargs)
|
|
|
|
proc.wait()
|
2019-02-22 03:53:37 +01:00
|
|
|
|
|
|
|
if proc.returncode != 0:
|
|
|
|
raise ExternalError(
|
|
|
|
"Failed to run command '{}' (exit code {})".format(
|
|
|
|
args, proc.returncode))
|
2019-02-02 00:52:10 +01:00
|
|
|
|
|
|
|
|
2018-10-05 00:46:16 +02:00
|
|
|
def RunAndCheckOutput(args, verbose=None, **kwargs):
|
|
|
|
"""Runs the given command and returns the output.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
args: The command represented as a list of strings.
|
2018-10-12 19:30:39 +02:00
|
|
|
verbose: Whether the commands should be shown. Default to the global
|
|
|
|
verbosity if unspecified.
|
2018-10-05 00:46:16 +02:00
|
|
|
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
|
|
|
|
stdin, etc. stdout and stderr will default to subprocess.PIPE and
|
|
|
|
subprocess.STDOUT respectively unless caller specifies any of them.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The output string.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ExternalError: On non-zero exit from the command.
|
|
|
|
"""
|
|
|
|
proc = Run(args, verbose=verbose, **kwargs)
|
|
|
|
output, _ = proc.communicate()
|
2018-10-12 19:30:39 +02:00
|
|
|
# Don't log any if caller explicitly says so.
|
|
|
|
if verbose != False:
|
|
|
|
logger.info("%s", output.rstrip())
|
2018-10-05 00:46:16 +02:00
|
|
|
if proc.returncode != 0:
|
|
|
|
raise ExternalError(
|
|
|
|
"Failed to run command '{}' (exit code {}):\n{}".format(
|
|
|
|
args, proc.returncode, output))
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
2018-02-01 02:32:40 +01:00
|
|
|
def RoundUpTo4K(value):
|
|
|
|
rounded_up = value + 4095
|
|
|
|
return rounded_up - (rounded_up % 4096)
|
|
|
|
|
|
|
|
|
2010-12-14 01:25:36 +01:00
|
|
|
def CloseInheritedPipes():
|
|
|
|
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
|
|
|
|
before doing other work."""
|
|
|
|
if platform.system() != "Darwin":
|
|
|
|
return
|
|
|
|
for d in range(3, 1025):
|
|
|
|
try:
|
|
|
|
stat = os.fstat(d)
|
|
|
|
if stat is not None:
|
|
|
|
pipebit = stat[0] & 0x1000
|
|
|
|
if pipebit != 0:
|
|
|
|
os.close(d)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2018-08-24 21:08:38 +02:00
|
|
|
def LoadInfoDict(input_file, repacking=False):
|
|
|
|
"""Loads the key/value pairs from the given input target_files.
|
|
|
|
|
|
|
|
It reads `META/misc_info.txt` file in the target_files input, does sanity
|
|
|
|
checks and returns the parsed key/value pairs for to the given build. It's
|
|
|
|
usually called early when working on input target_files files, e.g. when
|
|
|
|
generating OTAs, or signing builds. Note that the function may be called
|
|
|
|
against an old target_files file (i.e. from past dessert releases). So the
|
|
|
|
property parsing needs to be backward compatible.
|
|
|
|
|
|
|
|
In a `META/misc_info.txt`, a few properties are stored as links to the files
|
|
|
|
in the PRODUCT_OUT directory. It works fine with the build system. However,
|
|
|
|
they are no longer available when (re)generating images from target_files zip.
|
|
|
|
When `repacking` is True, redirect these properties to the actual files in the
|
|
|
|
unzipped directory.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
input_file: The input target_files file, which could be an open
|
|
|
|
zipfile.ZipFile instance, or a str for the dir that contains the files
|
|
|
|
unzipped from a target_files file.
|
|
|
|
repacking: Whether it's trying repack an target_files file after loading the
|
|
|
|
info dict (default: False). If so, it will rewrite a few loaded
|
|
|
|
properties (e.g. selinux_fc, root_dir) to point to the actual files in
|
|
|
|
target_files file. When doing repacking, `input_file` must be a dir.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A dict that contains the parsed key/value pairs.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: On invalid input arguments.
|
|
|
|
ValueError: On malformed input values.
|
|
|
|
"""
|
|
|
|
if repacking:
|
|
|
|
assert isinstance(input_file, str), \
|
|
|
|
"input_file must be a path str when doing repacking"
|
2010-07-02 00:30:11 +02:00
|
|
|
|
2014-02-04 21:17:58 +01:00
|
|
|
def read_helper(fn):
|
2015-03-24 03:13:21 +01:00
|
|
|
if isinstance(input_file, zipfile.ZipFile):
|
2017-12-02 01:19:46 +01:00
|
|
|
return input_file.read(fn).decode()
|
2014-02-04 21:17:58 +01:00
|
|
|
else:
|
2015-03-24 03:13:21 +01:00
|
|
|
path = os.path.join(input_file, *fn.split("/"))
|
2014-02-04 21:17:58 +01:00
|
|
|
try:
|
|
|
|
with open(path) as f:
|
|
|
|
return f.read()
|
2015-03-24 03:13:21 +01:00
|
|
|
except IOError as e:
|
2014-02-04 21:17:58 +01:00
|
|
|
if e.errno == errno.ENOENT:
|
|
|
|
raise KeyError(fn)
|
2017-02-28 00:12:05 +01:00
|
|
|
|
2010-07-02 00:30:11 +02:00
|
|
|
try:
|
2014-04-16 02:40:21 +02:00
|
|
|
d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
|
2010-09-17 02:44:38 +02:00
|
|
|
except KeyError:
|
2018-08-24 21:08:38 +02:00
|
|
|
raise ValueError("Failed to find META/misc_info.txt in input target-files")
|
|
|
|
|
|
|
|
if "recovery_api_version" not in d:
|
|
|
|
raise ValueError("Failed to find 'recovery_api_version'")
|
|
|
|
if "fstab_version" not in d:
|
|
|
|
raise ValueError("Failed to find 'fstab_version'")
|
|
|
|
|
|
|
|
if repacking:
|
2019-05-14 00:58:14 +02:00
|
|
|
# "selinux_fc" properties should point to the file_contexts files
|
|
|
|
# (file_contexts.bin) under META/.
|
|
|
|
for key in d:
|
|
|
|
if key.endswith("selinux_fc"):
|
|
|
|
fc_basename = os.path.basename(d[key])
|
|
|
|
fc_config = os.path.join(input_file, "META", fc_basename)
|
|
|
|
assert os.path.exists(fc_config)
|
|
|
|
|
|
|
|
d[key] = fc_config
|
2018-08-09 23:26:00 +02:00
|
|
|
|
|
|
|
# Similarly we need to redirect "root_dir", and "root_fs_config".
|
2018-08-24 21:08:38 +02:00
|
|
|
d["root_dir"] = os.path.join(input_file, "ROOT")
|
2018-08-09 23:26:00 +02:00
|
|
|
d["root_fs_config"] = os.path.join(
|
2018-08-24 21:08:38 +02:00
|
|
|
input_file, "META", "root_filesystem_config.txt")
|
2015-07-19 11:38:53 +02:00
|
|
|
|
2016-03-30 00:12:37 +02:00
|
|
|
# Redirect {system,vendor}_base_fs_file.
|
|
|
|
if "system_base_fs_file" in d:
|
|
|
|
basename = os.path.basename(d["system_base_fs_file"])
|
2018-08-24 21:08:38 +02:00
|
|
|
system_base_fs_file = os.path.join(input_file, "META", basename)
|
2016-05-03 17:01:19 +02:00
|
|
|
if os.path.exists(system_base_fs_file):
|
|
|
|
d["system_base_fs_file"] = system_base_fs_file
|
|
|
|
else:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning(
|
|
|
|
"Failed to find system base fs file: %s", system_base_fs_file)
|
2016-05-03 17:01:19 +02:00
|
|
|
del d["system_base_fs_file"]
|
2016-03-30 00:12:37 +02:00
|
|
|
|
|
|
|
if "vendor_base_fs_file" in d:
|
|
|
|
basename = os.path.basename(d["vendor_base_fs_file"])
|
2018-08-24 21:08:38 +02:00
|
|
|
vendor_base_fs_file = os.path.join(input_file, "META", basename)
|
2016-05-03 17:01:19 +02:00
|
|
|
if os.path.exists(vendor_base_fs_file):
|
|
|
|
d["vendor_base_fs_file"] = vendor_base_fs_file
|
|
|
|
else:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning(
|
|
|
|
"Failed to find vendor base fs file: %s", vendor_base_fs_file)
|
2016-05-03 17:01:19 +02:00
|
|
|
del d["vendor_base_fs_file"]
|
2016-03-30 00:12:37 +02:00
|
|
|
|
2010-09-17 02:44:38 +02:00
|
|
|
def makeint(key):
|
|
|
|
if key in d:
|
|
|
|
d[key] = int(d[key], 0)
|
|
|
|
|
|
|
|
makeint("recovery_api_version")
|
|
|
|
makeint("blocksize")
|
|
|
|
makeint("system_size")
|
2014-07-11 00:42:38 +02:00
|
|
|
makeint("vendor_size")
|
2010-09-17 02:44:38 +02:00
|
|
|
makeint("userdata_size")
|
2011-11-04 19:37:01 +01:00
|
|
|
makeint("cache_size")
|
2010-09-17 02:44:38 +02:00
|
|
|
makeint("recovery_size")
|
|
|
|
makeint("boot_size")
|
2013-02-20 02:35:29 +01:00
|
|
|
makeint("fstab_version")
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2018-08-24 21:08:38 +02:00
|
|
|
# We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
|
|
|
|
# ../RAMDISK/system/etc/recovery.fstab. LoadInfoDict() has to handle both
|
|
|
|
# cases, since it may load the info_dict from an old build (e.g. when
|
|
|
|
# generating incremental OTAs from that build).
|
2017-11-21 18:25:31 +01:00
|
|
|
system_root_image = d.get("system_root_image") == "true"
|
|
|
|
if d.get("no_recovery") != "true":
|
2018-08-18 01:27:01 +02:00
|
|
|
recovery_fstab_path = "RECOVERY/RAMDISK/system/etc/recovery.fstab"
|
2018-08-23 03:27:14 +02:00
|
|
|
if isinstance(input_file, zipfile.ZipFile):
|
|
|
|
if recovery_fstab_path not in input_file.namelist():
|
|
|
|
recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
|
|
|
|
else:
|
|
|
|
path = os.path.join(input_file, *recovery_fstab_path.split("/"))
|
|
|
|
if not os.path.exists(path):
|
|
|
|
recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
|
2017-11-21 18:25:31 +01:00
|
|
|
d["fstab"] = LoadRecoveryFSTab(
|
|
|
|
read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
|
2018-08-23 03:27:14 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
elif d.get("recovery_as_boot") == "true":
|
2018-08-18 01:27:01 +02:00
|
|
|
recovery_fstab_path = "BOOT/RAMDISK/system/etc/recovery.fstab"
|
2018-08-23 03:27:14 +02:00
|
|
|
if isinstance(input_file, zipfile.ZipFile):
|
|
|
|
if recovery_fstab_path not in input_file.namelist():
|
|
|
|
recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
|
|
|
|
else:
|
|
|
|
path = os.path.join(input_file, *recovery_fstab_path.split("/"))
|
|
|
|
if not os.path.exists(path):
|
|
|
|
recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
|
2017-11-21 18:25:31 +01:00
|
|
|
d["fstab"] = LoadRecoveryFSTab(
|
|
|
|
read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
|
2018-08-23 03:27:14 +02:00
|
|
|
|
2016-03-08 01:31:19 +01:00
|
|
|
else:
|
|
|
|
d["fstab"] = None
|
|
|
|
|
2018-09-12 20:49:33 +02:00
|
|
|
# Tries to load the build props for all partitions with care_map, including
|
|
|
|
# system and vendor.
|
|
|
|
for partition in PARTITIONS_WITH_CARE_MAP:
|
2019-05-17 17:21:48 +02:00
|
|
|
partition_prop = "{}.build.prop".format(partition)
|
|
|
|
d[partition_prop] = LoadBuildProp(
|
2018-09-12 20:49:33 +02:00
|
|
|
read_helper, "{}/build.prop".format(partition.upper()))
|
2019-05-17 17:21:48 +02:00
|
|
|
# Some partition might use /<partition>/etc/build.prop as the new path.
|
|
|
|
# TODO: try new path first when majority of them switch to the new path.
|
|
|
|
if not d[partition_prop]:
|
|
|
|
d[partition_prop] = LoadBuildProp(
|
|
|
|
read_helper, "{}/etc/build.prop".format(partition.upper()))
|
2018-09-12 20:49:33 +02:00
|
|
|
d["build.prop"] = d["system.build.prop"]
|
2018-01-31 21:18:52 +01:00
|
|
|
|
|
|
|
# Set up the salt (based on fingerprint or thumbprint) that will be used when
|
|
|
|
# adding AVB footer.
|
|
|
|
if d.get("avb_enable") == "true":
|
|
|
|
fp = None
|
|
|
|
if "build.prop" in d:
|
|
|
|
build_prop = d["build.prop"]
|
|
|
|
if "ro.build.fingerprint" in build_prop:
|
|
|
|
fp = build_prop["ro.build.fingerprint"]
|
|
|
|
elif "ro.build.thumbprint" in build_prop:
|
|
|
|
fp = build_prop["ro.build.thumbprint"]
|
|
|
|
if fp:
|
|
|
|
d["avb_salt"] = sha256(fp).hexdigest()
|
|
|
|
|
2012-08-17 01:19:00 +02:00
|
|
|
return d
|
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
|
2017-08-26 22:10:26 +02:00
|
|
|
def LoadBuildProp(read_helper, prop_file):
|
2012-08-17 01:19:00 +02:00
|
|
|
try:
|
2017-08-26 22:10:26 +02:00
|
|
|
data = read_helper(prop_file)
|
2012-08-17 01:19:00 +02:00
|
|
|
except KeyError:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("Failed to read %s", prop_file)
|
2012-08-17 01:19:00 +02:00
|
|
|
data = ""
|
2014-04-16 02:40:21 +02:00
|
|
|
return LoadDictionaryFromLines(data.split("\n"))
|
2012-08-17 01:19:00 +02:00
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
|
2019-06-25 10:09:55 +02:00
|
|
|
def LoadDictionaryFromFile(file_path):
|
|
|
|
with open(file_path) as f:
|
|
|
|
lines = list(f.read().splitlines())
|
|
|
|
|
|
|
|
return LoadDictionaryFromLines(lines)
|
|
|
|
|
|
|
|
|
2014-04-16 02:40:21 +02:00
|
|
|
def LoadDictionaryFromLines(lines):
|
2012-08-17 01:19:00 +02:00
|
|
|
d = {}
|
2014-04-16 02:40:21 +02:00
|
|
|
for line in lines:
|
2012-08-17 01:19:00 +02:00
|
|
|
line = line.strip()
|
2015-03-24 03:13:21 +01:00
|
|
|
if not line or line.startswith("#"):
|
|
|
|
continue
|
2014-04-15 20:24:00 +02:00
|
|
|
if "=" in line:
|
|
|
|
name, value = line.split("=", 1)
|
|
|
|
d[name] = value
|
2010-09-21 03:04:41 +02:00
|
|
|
return d
|
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
|
2016-03-08 01:31:19 +01:00
|
|
|
def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
|
|
|
|
system_root_image=False):
|
2010-09-21 03:04:41 +02:00
|
|
|
class Partition(object):
|
2017-03-02 01:38:48 +01:00
|
|
|
def __init__(self, mount_point, fs_type, device, length, context):
|
2015-03-24 03:13:21 +01:00
|
|
|
self.mount_point = mount_point
|
|
|
|
self.fs_type = fs_type
|
|
|
|
self.device = device
|
|
|
|
self.length = length
|
2015-06-10 21:32:41 +02:00
|
|
|
self.context = context
|
2010-09-21 03:04:41 +02:00
|
|
|
|
|
|
|
try:
|
2016-03-08 01:31:19 +01:00
|
|
|
data = read_helper(recovery_fstab_path)
|
2010-09-21 03:04:41 +02:00
|
|
|
except KeyError:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("Failed to find %s", recovery_fstab_path)
|
2011-10-27 03:08:09 +02:00
|
|
|
data = ""
|
2010-09-21 03:04:41 +02:00
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
assert fstab_version == 2
|
|
|
|
|
|
|
|
d = {}
|
|
|
|
for line in data.split("\n"):
|
|
|
|
line = line.strip()
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
|
|
|
|
pieces = line.split()
|
|
|
|
if len(pieces) != 5:
|
|
|
|
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
|
|
|
|
|
|
|
|
# Ignore entries that are managed by vold.
|
|
|
|
options = pieces[4]
|
|
|
|
if "voldmanaged=" in options:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# It's a good line, parse it.
|
|
|
|
length = 0
|
|
|
|
options = options.split(",")
|
|
|
|
for i in options:
|
|
|
|
if i.startswith("length="):
|
|
|
|
length = int(i[7:])
|
2011-02-18 00:54:20 +01:00
|
|
|
else:
|
2017-03-02 01:38:48 +01:00
|
|
|
# Ignore all unknown options in the unified fstab.
|
2015-03-24 03:13:21 +01:00
|
|
|
continue
|
2013-02-20 02:35:29 +01:00
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
mount_flags = pieces[3]
|
|
|
|
# Honor the SELinux context if present.
|
|
|
|
context = None
|
|
|
|
for i in mount_flags.split(","):
|
|
|
|
if i.startswith("context="):
|
|
|
|
context = i
|
2013-02-20 02:35:29 +01:00
|
|
|
|
2017-03-02 01:38:48 +01:00
|
|
|
mount_point = pieces[1]
|
|
|
|
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
|
|
|
|
device=pieces[0], length=length, context=context)
|
2011-02-18 00:54:20 +01:00
|
|
|
|
2015-06-06 02:59:27 +02:00
|
|
|
# / is used for the system mount point when the root directory is included in
|
2015-07-22 03:01:20 +02:00
|
|
|
# system. Other areas assume system is always at "/system" so point /system
|
|
|
|
# at /.
|
2015-06-06 02:59:27 +02:00
|
|
|
if system_root_image:
|
2017-12-02 01:19:46 +01:00
|
|
|
assert '/system' not in d and '/' in d
|
2015-06-06 02:59:27 +02:00
|
|
|
d["/system"] = d["/"]
|
2010-09-17 02:44:38 +02:00
|
|
|
return d
|
2010-08-26 05:39:41 +02:00
|
|
|
|
2010-09-21 03:04:41 +02:00
|
|
|
|
2010-09-17 02:44:38 +02:00
|
|
|
def DumpInfoDict(d):
|
|
|
|
for k, v in sorted(d.items()):
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
|
2010-08-26 05:39:41 +02:00
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
|
2017-05-26 12:30:04 +02:00
|
|
|
def AppendAVBSigningArgs(cmd, partition):
|
|
|
|
"""Append signing arguments for avbtool."""
|
|
|
|
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
|
|
|
|
key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
|
|
|
|
algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
|
|
|
|
if key_path and algorithm:
|
|
|
|
cmd.extend(["--key", key_path, "--algorithm", algorithm])
|
2017-09-28 02:17:43 +02:00
|
|
|
avb_salt = OPTIONS.info_dict.get("avb_salt")
|
|
|
|
# make_vbmeta_image doesn't like "--salt" (and it's not needed).
|
2018-08-21 06:09:07 +02:00
|
|
|
if avb_salt and not partition.startswith("vbmeta"):
|
2017-09-28 02:17:43 +02:00
|
|
|
cmd.extend(["--salt", avb_salt])
|
2017-05-26 12:30:04 +02:00
|
|
|
|
|
|
|
|
2018-07-22 21:40:45 +02:00
|
|
|
def GetAvbChainedPartitionArg(partition, info_dict, key=None):
|
|
|
|
"""Constructs and returns the arg to build or verify a chained partition.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
partition: The partition name.
|
|
|
|
info_dict: The info dict to look up the key info and rollback index
|
|
|
|
location.
|
|
|
|
key: The key to be used for building or verifying the partition. Defaults to
|
|
|
|
the key listed in info_dict.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A string of form "partition:rollback_index_location:key" that can be used to
|
|
|
|
build or verify vbmeta image.
|
|
|
|
"""
|
|
|
|
if key is None:
|
|
|
|
key = info_dict["avb_" + partition + "_key_path"]
|
2019-06-26 20:58:22 +02:00
|
|
|
pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
|
2018-07-22 21:40:45 +02:00
|
|
|
rollback_index_location = info_dict[
|
|
|
|
"avb_" + partition + "_rollback_index_location"]
|
|
|
|
return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
|
|
|
|
|
|
|
|
|
2015-07-22 03:01:20 +02:00
|
|
|
def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
|
2016-11-30 21:11:57 +01:00
|
|
|
has_ramdisk=False, two_step_image=False):
|
2015-07-22 03:01:20 +02:00
|
|
|
"""Build a bootable image from the specified sourcedir.
|
2009-06-24 02:40:35 +02:00
|
|
|
|
2015-07-22 03:01:20 +02:00
|
|
|
Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
|
2016-11-30 21:11:57 +01:00
|
|
|
'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
|
|
|
|
we are building a two-step special image (i.e. building a recovery image to
|
|
|
|
be loaded into /boot in two-step OTAs).
|
|
|
|
|
|
|
|
Return the image data, or None if sourcedir does not appear to contains files
|
|
|
|
for building the requested image.
|
|
|
|
"""
|
2015-07-22 03:01:20 +02:00
|
|
|
|
|
|
|
def make_ramdisk():
|
|
|
|
ramdisk_img = tempfile.NamedTemporaryFile()
|
|
|
|
|
|
|
|
if os.access(fs_config_file, os.F_OK):
|
|
|
|
cmd = ["mkbootfs", "-f", fs_config_file,
|
|
|
|
os.path.join(sourcedir, "RAMDISK")]
|
|
|
|
else:
|
|
|
|
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
|
|
|
|
p1 = Run(cmd, stdout=subprocess.PIPE)
|
|
|
|
p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
|
|
|
|
|
|
|
|
p2.wait()
|
|
|
|
p1.wait()
|
|
|
|
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
|
|
|
|
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
|
|
|
|
|
|
|
|
return ramdisk_img
|
|
|
|
|
|
|
|
if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
|
|
|
|
return None
|
|
|
|
|
|
|
|
if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
|
2009-06-24 02:40:35 +02:00
|
|
|
return None
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2012-08-02 23:46:42 +02:00
|
|
|
if info_dict is None:
|
|
|
|
info_dict = OPTIONS.info_dict
|
|
|
|
|
2009-04-02 21:14:19 +02:00
|
|
|
img = tempfile.NamedTemporaryFile()
|
|
|
|
|
2015-07-22 03:01:20 +02:00
|
|
|
if has_ramdisk:
|
|
|
|
ramdisk_img = make_ramdisk()
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2012-11-26 01:53:44 +01:00
|
|
|
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
|
|
|
|
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
|
|
|
|
|
|
|
|
cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
|
2009-06-17 18:07:09 +02:00
|
|
|
|
2014-07-14 21:00:43 +02:00
|
|
|
fn = os.path.join(sourcedir, "second")
|
|
|
|
if os.access(fn, os.F_OK):
|
|
|
|
cmd.append("--second")
|
|
|
|
cmd.append(fn)
|
|
|
|
|
2019-01-23 03:08:59 +01:00
|
|
|
fn = os.path.join(sourcedir, "dtb")
|
|
|
|
if os.access(fn, os.F_OK):
|
|
|
|
cmd.append("--dtb")
|
|
|
|
cmd.append(fn)
|
|
|
|
|
2009-06-16 07:36:37 +02:00
|
|
|
fn = os.path.join(sourcedir, "cmdline")
|
|
|
|
if os.access(fn, os.F_OK):
|
2009-06-17 18:07:09 +02:00
|
|
|
cmd.append("--cmdline")
|
|
|
|
cmd.append(open(fn).read().rstrip("\n"))
|
|
|
|
|
|
|
|
fn = os.path.join(sourcedir, "base")
|
|
|
|
if os.access(fn, os.F_OK):
|
|
|
|
cmd.append("--base")
|
|
|
|
cmd.append(open(fn).read().rstrip("\n"))
|
|
|
|
|
2010-08-25 23:29:34 +02:00
|
|
|
fn = os.path.join(sourcedir, "pagesize")
|
|
|
|
if os.access(fn, os.F_OK):
|
|
|
|
cmd.append("--pagesize")
|
|
|
|
cmd.append(open(fn).read().rstrip("\n"))
|
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
args = info_dict.get("mkbootimg_args")
|
2012-08-02 23:46:42 +02:00
|
|
|
if args and args.strip():
|
2013-04-18 00:19:19 +02:00
|
|
|
cmd.extend(shlex.split(args))
|
2012-08-02 23:46:42 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
args = info_dict.get("mkbootimg_version_args")
|
2016-03-15 17:49:30 +01:00
|
|
|
if args and args.strip():
|
|
|
|
cmd.extend(shlex.split(args))
|
|
|
|
|
2015-07-22 03:01:20 +02:00
|
|
|
if has_ramdisk:
|
|
|
|
cmd.extend(["--ramdisk", ramdisk_img.name])
|
|
|
|
|
2015-03-30 08:07:41 +02:00
|
|
|
img_unsigned = None
|
2017-11-21 18:25:31 +01:00
|
|
|
if info_dict.get("vboot"):
|
2015-03-30 08:07:41 +02:00
|
|
|
img_unsigned = tempfile.NamedTemporaryFile()
|
2015-07-22 03:01:20 +02:00
|
|
|
cmd.extend(["--output", img_unsigned.name])
|
2015-03-30 08:07:41 +02:00
|
|
|
else:
|
2015-07-22 03:01:20 +02:00
|
|
|
cmd.extend(["--output", img.name])
|
2009-06-17 18:07:09 +02:00
|
|
|
|
2017-07-12 02:27:55 +02:00
|
|
|
# "boot" or "recovery", without extension.
|
|
|
|
partition_name = os.path.basename(sourcedir).lower()
|
|
|
|
|
2018-09-23 16:10:47 +02:00
|
|
|
if partition_name == "recovery":
|
|
|
|
if info_dict.get("include_recovery_dtbo") == "true":
|
|
|
|
fn = os.path.join(sourcedir, "recovery_dtbo")
|
|
|
|
cmd.extend(["--recovery_dtbo", fn])
|
|
|
|
if info_dict.get("include_recovery_acpio") == "true":
|
|
|
|
fn = os.path.join(sourcedir, "recovery_acpio")
|
|
|
|
cmd.extend(["--recovery_acpio", fn])
|
2018-03-21 20:15:11 +01:00
|
|
|
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(cmd)
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
if (info_dict.get("boot_signer") == "true" and
|
|
|
|
info_dict.get("verity_key")):
|
2016-11-30 21:11:57 +01:00
|
|
|
# Hard-code the path as "/boot" for two-step special recovery image (which
|
|
|
|
# will be loaded into /boot during the two-step OTA).
|
|
|
|
if two_step_image:
|
|
|
|
path = "/boot"
|
|
|
|
else:
|
2017-07-12 02:27:55 +02:00
|
|
|
path = "/" + partition_name
|
2015-06-10 00:48:14 +02:00
|
|
|
cmd = [OPTIONS.boot_signer_path]
|
|
|
|
cmd.extend(OPTIONS.boot_signer_args)
|
|
|
|
cmd.extend([path, img.name,
|
|
|
|
info_dict["verity_key"] + ".pk8",
|
|
|
|
info_dict["verity_key"] + ".x509.pem", img.name])
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(cmd)
|
2014-08-20 02:27:56 +02:00
|
|
|
|
2015-03-30 08:07:41 +02:00
|
|
|
# Sign the image if vboot is non-empty.
|
2017-11-21 18:25:31 +01:00
|
|
|
elif info_dict.get("vboot"):
|
2017-07-12 02:27:55 +02:00
|
|
|
path = "/" + partition_name
|
2015-03-30 08:07:41 +02:00
|
|
|
img_keyblock = tempfile.NamedTemporaryFile()
|
2017-02-18 08:21:31 +01:00
|
|
|
# We have switched from the prebuilt futility binary to using the tool
|
|
|
|
# (futility-host) built from the source. Override the setting in the old
|
|
|
|
# TF.zip.
|
|
|
|
futility = info_dict["futility"]
|
|
|
|
if futility.startswith("prebuilts/"):
|
|
|
|
futility = "futility-host"
|
|
|
|
cmd = [info_dict["vboot_signer_cmd"], futility,
|
2015-03-30 08:07:41 +02:00
|
|
|
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
|
2015-08-10 20:43:45 +02:00
|
|
|
info_dict["vboot_key"] + ".vbprivk",
|
|
|
|
info_dict["vboot_subkey"] + ".vbprivk",
|
|
|
|
img_keyblock.name,
|
2015-03-30 08:07:41 +02:00
|
|
|
img.name]
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(cmd)
|
2015-03-30 08:07:41 +02:00
|
|
|
|
2015-04-01 20:21:55 +02:00
|
|
|
# Clean up the temp files.
|
|
|
|
img_unsigned.close()
|
|
|
|
img_keyblock.close()
|
|
|
|
|
2017-12-01 22:24:01 +01:00
|
|
|
# AVB: if enabled, calculate and add hash to boot.img or recovery.img.
|
2017-05-26 12:30:04 +02:00
|
|
|
if info_dict.get("avb_enable") == "true":
|
2019-03-18 22:01:38 +01:00
|
|
|
avbtool = info_dict["avb_avbtool"]
|
2017-12-01 22:24:01 +01:00
|
|
|
part_size = info_dict[partition_name + "_size"]
|
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
|
|
|
cmd = [avbtool, "add_hash_footer", "--image", img.name,
|
2017-07-12 02:27:55 +02:00
|
|
|
"--partition_size", str(part_size), "--partition_name",
|
|
|
|
partition_name]
|
|
|
|
AppendAVBSigningArgs(cmd, partition_name)
|
2017-12-01 22:24:01 +01:00
|
|
|
args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
|
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
|
|
|
if args and args.strip():
|
|
|
|
cmd.extend(shlex.split(args))
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(cmd)
|
2016-01-29 22:59:17 +01:00
|
|
|
|
|
|
|
img.seek(os.SEEK_SET, 0)
|
|
|
|
data = img.read()
|
|
|
|
|
|
|
|
if has_ramdisk:
|
|
|
|
ramdisk_img.close()
|
|
|
|
img.close()
|
|
|
|
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2012-08-02 23:46:42 +02:00
|
|
|
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
|
2016-11-30 21:11:57 +01:00
|
|
|
info_dict=None, two_step_image=False):
|
2015-07-22 03:01:20 +02:00
|
|
|
"""Return a File object with the desired bootable image.
|
|
|
|
|
|
|
|
Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
|
|
|
|
otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
|
|
|
|
the source files in 'unpack_dir'/'tree_subdir'."""
|
2011-01-26 02:03:34 +01:00
|
|
|
|
|
|
|
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
|
|
|
|
if os.path.exists(prebuilt_path):
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
|
2011-01-26 02:03:34 +01:00
|
|
|
return File.FromLocalFile(name, prebuilt_path)
|
2014-08-22 17:07:12 +02:00
|
|
|
|
|
|
|
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
|
|
|
|
if os.path.exists(prebuilt_path):
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
|
2014-08-22 17:07:12 +02:00
|
|
|
return File.FromLocalFile(name, prebuilt_path)
|
|
|
|
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("building image from target_files %s...", tree_subdir)
|
2015-07-22 03:01:20 +02:00
|
|
|
|
|
|
|
if info_dict is None:
|
|
|
|
info_dict = OPTIONS.info_dict
|
|
|
|
|
|
|
|
# With system_root_image == "true", we don't pack ramdisk into the boot image.
|
2015-11-11 04:21:34 +01:00
|
|
|
# Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
|
|
|
|
# for recovery.
|
|
|
|
has_ramdisk = (info_dict.get("system_root_image") != "true" or
|
|
|
|
prebuilt_name != "boot.img" or
|
|
|
|
info_dict.get("recovery_as_boot") == "true")
|
2015-07-22 03:01:20 +02:00
|
|
|
|
2014-08-22 17:07:12 +02:00
|
|
|
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
|
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
|
|
|
data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
|
|
|
|
os.path.join(unpack_dir, fs_config),
|
2016-11-30 21:11:57 +01:00
|
|
|
info_dict, has_ramdisk, two_step_image)
|
2014-08-22 17:07:12 +02:00
|
|
|
if data:
|
|
|
|
return File(name, data)
|
|
|
|
return None
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
2017-08-14 15:49:21 +02:00
|
|
|
def Gunzip(in_filename, out_filename):
|
2017-11-21 18:25:31 +01:00
|
|
|
"""Gunzips the given gzip compressed file to a given output file."""
|
|
|
|
with gzip.open(in_filename, "rb") as in_file, \
|
|
|
|
open(out_filename, "wb") as out_file:
|
2017-08-14 15:49:21 +02:00
|
|
|
shutil.copyfileobj(in_file, out_file)
|
|
|
|
|
|
|
|
|
2019-03-20 19:26:06 +01:00
|
|
|
def UnzipToDir(filename, dirname, patterns=None):
|
2019-02-22 19:57:43 +01:00
|
|
|
"""Unzips the archive to the given directory.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
filename: The name of the zip file to unzip.
|
|
|
|
dirname: Where the unziped files will land.
|
2019-03-20 19:26:06 +01:00
|
|
|
patterns: Files to unzip from the archive. If omitted, will unzip the entire
|
|
|
|
archvie. Non-matching patterns will be filtered out. If there's no match
|
|
|
|
after the filtering, no file will be unzipped.
|
2019-02-22 19:57:43 +01:00
|
|
|
"""
|
|
|
|
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
|
2019-03-20 19:26:06 +01:00
|
|
|
if patterns is not None:
|
|
|
|
# Filter out non-matching patterns. unzip will complain otherwise.
|
|
|
|
with zipfile.ZipFile(filename) as input_zip:
|
|
|
|
names = input_zip.namelist()
|
|
|
|
filtered = [
|
|
|
|
pattern for pattern in patterns if fnmatch.filter(names, pattern)]
|
|
|
|
|
|
|
|
# There isn't any matching files. Don't unzip anything.
|
|
|
|
if not filtered:
|
|
|
|
return
|
|
|
|
cmd.extend(filtered)
|
|
|
|
|
2019-02-22 19:57:43 +01:00
|
|
|
RunAndCheckOutput(cmd)
|
|
|
|
|
|
|
|
|
2009-12-08 22:46:44 +01:00
|
|
|
def UnzipTemp(filename, pattern=None):
|
2017-12-25 19:43:47 +01:00
|
|
|
"""Unzips the given archive into a temporary directory and returns the name.
|
2011-01-26 02:03:34 +01:00
|
|
|
|
2019-02-22 19:57:43 +01:00
|
|
|
Args:
|
|
|
|
filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
|
|
|
|
a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
|
|
|
|
|
|
|
|
pattern: Files to unzip from the archive. If omitted, will unzip the entire
|
|
|
|
archvie.
|
2011-01-26 02:03:34 +01:00
|
|
|
|
2017-12-25 19:43:47 +01:00
|
|
|
Returns:
|
2018-01-09 22:21:02 +01:00
|
|
|
The name of the temporary directory.
|
2011-01-26 02:03:34 +01:00
|
|
|
"""
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2017-12-25 19:43:47 +01:00
|
|
|
tmp = MakeTempDir(prefix="targetfiles-")
|
2011-01-26 02:03:34 +01:00
|
|
|
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
|
|
|
|
if m:
|
2019-02-22 19:57:43 +01:00
|
|
|
UnzipToDir(m.group(1), tmp, pattern)
|
|
|
|
UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
|
2011-01-26 02:03:34 +01:00
|
|
|
filename = m.group(1)
|
|
|
|
else:
|
2019-02-22 19:57:43 +01:00
|
|
|
UnzipToDir(filename, tmp, pattern)
|
2011-01-26 02:03:34 +01:00
|
|
|
|
2018-01-09 22:21:02 +01:00
|
|
|
return tmp
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
2019-04-05 00:37:57 +02:00
|
|
|
def GetUserImage(which, tmpdir, input_zip,
|
|
|
|
info_dict=None,
|
|
|
|
allow_shared_blocks=None,
|
|
|
|
hashtree_info_generator=None,
|
|
|
|
reset_file_map=False):
|
|
|
|
"""Returns an Image object suitable for passing to BlockImageDiff.
|
|
|
|
|
|
|
|
This function loads the specified image from the given path. If the specified
|
|
|
|
image is sparse, it also performs additional processing for OTA purpose. For
|
|
|
|
example, it always adds block 0 to clobbered blocks list. It also detects
|
|
|
|
files that cannot be reconstructed from the block list, for whom we should
|
|
|
|
avoid applying imgdiff.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
which: The partition name.
|
|
|
|
tmpdir: The directory that contains the prebuilt image and block map file.
|
|
|
|
input_zip: The target-files ZIP archive.
|
|
|
|
info_dict: The dict to be looked up for relevant info.
|
|
|
|
allow_shared_blocks: If image is sparse, whether having shared blocks is
|
|
|
|
allowed. If none, it is looked up from info_dict.
|
|
|
|
hashtree_info_generator: If present and image is sparse, generates the
|
|
|
|
hashtree_info for this sparse image.
|
|
|
|
reset_file_map: If true and image is sparse, reset file map before returning
|
|
|
|
the image.
|
|
|
|
Returns:
|
|
|
|
A Image object. If it is a sparse image and reset_file_map is False, the
|
|
|
|
image will have file_map info loaded.
|
|
|
|
"""
|
2019-06-19 01:29:37 +02:00
|
|
|
if info_dict is None:
|
2019-04-05 00:37:57 +02:00
|
|
|
info_dict = LoadInfoDict(input_zip)
|
|
|
|
|
|
|
|
is_sparse = info_dict.get("extfs_sparse_flag")
|
|
|
|
|
|
|
|
# When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
|
|
|
|
# shared blocks (i.e. some blocks will show up in multiple files' block
|
|
|
|
# list). We can only allocate such shared blocks to the first "owner", and
|
|
|
|
# disable imgdiff for all later occurrences.
|
|
|
|
if allow_shared_blocks is None:
|
|
|
|
allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
|
|
|
|
|
|
|
|
if is_sparse:
|
|
|
|
img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
|
|
|
|
hashtree_info_generator)
|
|
|
|
if reset_file_map:
|
|
|
|
img.ResetFileMap()
|
|
|
|
return img
|
|
|
|
else:
|
|
|
|
return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
|
|
|
|
|
|
|
|
|
|
|
|
def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
|
|
|
|
"""Returns a Image object suitable for passing to BlockImageDiff.
|
|
|
|
|
|
|
|
This function loads the specified non-sparse image from the given path.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
which: The partition name.
|
|
|
|
tmpdir: The directory that contains the prebuilt image and block map file.
|
|
|
|
Returns:
|
|
|
|
A Image object.
|
|
|
|
"""
|
|
|
|
path = os.path.join(tmpdir, "IMAGES", which + ".img")
|
|
|
|
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
|
|
|
|
|
|
|
|
# The image and map files must have been created prior to calling
|
|
|
|
# ota_from_target_files.py (since LMP).
|
|
|
|
assert os.path.exists(path) and os.path.exists(mappath)
|
|
|
|
|
|
|
|
return blockimgdiff.FileImage(path, hashtree_info_generator=
|
|
|
|
hashtree_info_generator)
|
|
|
|
|
2018-08-30 09:32:07 +02:00
|
|
|
def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
|
|
|
|
hashtree_info_generator=None):
|
2018-02-01 02:32:40 +01:00
|
|
|
"""Returns a SparseImage object suitable for passing to BlockImageDiff.
|
|
|
|
|
|
|
|
This function loads the specified sparse image from the given path, and
|
|
|
|
performs additional processing for OTA purpose. For example, it always adds
|
|
|
|
block 0 to clobbered blocks list. It also detects files that cannot be
|
|
|
|
reconstructed from the block list, for whom we should avoid applying imgdiff.
|
|
|
|
|
|
|
|
Args:
|
2019-04-10 19:01:47 +02:00
|
|
|
which: The partition name, e.g. "system", "vendor".
|
2018-02-01 02:32:40 +01:00
|
|
|
tmpdir: The directory that contains the prebuilt image and block map file.
|
|
|
|
input_zip: The target-files ZIP archive.
|
2018-02-07 21:40:00 +01:00
|
|
|
allow_shared_blocks: Whether having shared blocks is allowed.
|
2018-08-30 09:32:07 +02:00
|
|
|
hashtree_info_generator: If present, generates the hashtree_info for this
|
|
|
|
sparse image.
|
2018-02-01 02:32:40 +01:00
|
|
|
Returns:
|
|
|
|
A SparseImage object, with file_map info loaded.
|
|
|
|
"""
|
|
|
|
path = os.path.join(tmpdir, "IMAGES", which + ".img")
|
|
|
|
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
|
|
|
|
|
|
|
|
# The image and map files must have been created prior to calling
|
|
|
|
# ota_from_target_files.py (since LMP).
|
|
|
|
assert os.path.exists(path) and os.path.exists(mappath)
|
|
|
|
|
|
|
|
# In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
|
|
|
|
# it to clobbered_blocks so that it will be written to the target
|
|
|
|
# unconditionally. Note that they are still part of care_map. (Bug: 20939131)
|
|
|
|
clobbered_blocks = "0"
|
|
|
|
|
2018-08-30 09:32:07 +02:00
|
|
|
image = sparse_img.SparseImage(
|
|
|
|
path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
|
|
|
|
hashtree_info_generator=hashtree_info_generator)
|
2018-02-01 02:32:40 +01:00
|
|
|
|
|
|
|
# block.map may contain less blocks, because mke2fs may skip allocating blocks
|
|
|
|
# if they contain all zeros. We can't reconstruct such a file from its block
|
|
|
|
# list. Tag such entries accordingly. (Bug: 65213616)
|
|
|
|
for entry in image.file_map:
|
|
|
|
# Skip artificial names, such as "__ZERO", "__NONZERO-1".
|
2018-07-11 00:31:22 +02:00
|
|
|
if not entry.startswith('/'):
|
2018-02-01 02:32:40 +01:00
|
|
|
continue
|
|
|
|
|
2018-08-09 23:26:00 +02:00
|
|
|
# "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
|
|
|
|
# filename listed in system.map may contain an additional leading slash
|
|
|
|
# (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
|
|
|
|
# results.
|
2017-12-02 01:19:46 +01:00
|
|
|
arcname = entry.replace(which, which.upper(), 1).lstrip('/')
|
2018-07-11 00:31:22 +02:00
|
|
|
|
2018-08-09 23:26:00 +02:00
|
|
|
# Special handling another case, where files not under /system
|
|
|
|
# (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
|
2018-07-11 00:31:22 +02:00
|
|
|
if which == 'system' and not arcname.startswith('SYSTEM'):
|
|
|
|
arcname = 'ROOT/' + arcname
|
|
|
|
|
|
|
|
assert arcname in input_zip.namelist(), \
|
|
|
|
"Failed to find the ZIP entry for {}".format(entry)
|
|
|
|
|
2018-02-01 02:32:40 +01:00
|
|
|
info = input_zip.getinfo(arcname)
|
|
|
|
ranges = image.file_map[entry]
|
2018-02-07 21:40:00 +01:00
|
|
|
|
|
|
|
# If a RangeSet has been tagged as using shared blocks while loading the
|
2018-12-04 00:08:23 +01:00
|
|
|
# image, check the original block list to determine its completeness. Note
|
|
|
|
# that the 'incomplete' flag would be tagged to the original RangeSet only.
|
2018-02-07 21:40:00 +01:00
|
|
|
if ranges.extra.get('uses_shared_blocks'):
|
2018-12-04 00:08:23 +01:00
|
|
|
ranges = ranges.extra['uses_shared_blocks']
|
2018-02-07 21:40:00 +01:00
|
|
|
|
2018-02-01 02:32:40 +01:00
|
|
|
if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
|
|
|
|
ranges.extra['incomplete'] = True
|
|
|
|
|
|
|
|
return image
|
|
|
|
|
|
|
|
|
2009-04-02 21:14:19 +02:00
|
|
|
def GetKeyPasswords(keylist):
|
|
|
|
"""Given a list of keys, prompt the user to enter passwords for
|
|
|
|
those which require them. Return a {key: password} dict. password
|
|
|
|
will be None if the key has no password."""
|
|
|
|
|
2009-05-22 22:34:54 +02:00
|
|
|
no_passwords = []
|
|
|
|
need_passwords = []
|
2013-03-18 18:31:26 +01:00
|
|
|
key_passwords = {}
|
2009-04-02 21:14:19 +02:00
|
|
|
devnull = open("/dev/null", "w+b")
|
|
|
|
for k in sorted(keylist):
|
2009-12-16 00:06:55 +01:00
|
|
|
# We don't need a password for things that aren't really keys.
|
|
|
|
if k in SPECIAL_CERT_STRINGS:
|
2009-05-22 22:34:54 +02:00
|
|
|
no_passwords.append(k)
|
2009-04-14 23:05:15 +02:00
|
|
|
continue
|
|
|
|
|
2013-03-18 18:31:26 +01:00
|
|
|
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
|
2009-06-18 17:35:12 +02:00
|
|
|
"-inform", "DER", "-nocrypt"],
|
|
|
|
stdin=devnull.fileno(),
|
|
|
|
stdout=devnull.fileno(),
|
|
|
|
stderr=subprocess.STDOUT)
|
2009-04-02 21:14:19 +02:00
|
|
|
p.communicate()
|
|
|
|
if p.returncode == 0:
|
2013-03-18 18:31:26 +01:00
|
|
|
# Definitely an unencrypted key.
|
2009-05-22 22:34:54 +02:00
|
|
|
no_passwords.append(k)
|
2009-04-02 21:14:19 +02:00
|
|
|
else:
|
2013-03-18 18:31:26 +01:00
|
|
|
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
|
|
|
|
"-inform", "DER", "-passin", "pass:"],
|
|
|
|
stdin=devnull.fileno(),
|
|
|
|
stdout=devnull.fileno(),
|
|
|
|
stderr=subprocess.PIPE)
|
2015-03-24 03:13:21 +01:00
|
|
|
_, stderr = p.communicate()
|
2013-03-18 18:31:26 +01:00
|
|
|
if p.returncode == 0:
|
|
|
|
# Encrypted key with empty string as password.
|
|
|
|
key_passwords[k] = ''
|
|
|
|
elif stderr.startswith('Error decrypting key'):
|
|
|
|
# Definitely encrypted key.
|
|
|
|
# It would have said "Error reading key" if it didn't parse correctly.
|
|
|
|
need_passwords.append(k)
|
|
|
|
else:
|
|
|
|
# Potentially, a type of key that openssl doesn't understand.
|
|
|
|
# We'll let the routines in signapk.jar handle it.
|
|
|
|
no_passwords.append(k)
|
2009-04-02 21:14:19 +02:00
|
|
|
devnull.close()
|
2009-05-22 22:34:54 +02:00
|
|
|
|
2013-03-18 18:31:26 +01:00
|
|
|
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
|
2017-11-21 18:25:31 +01:00
|
|
|
key_passwords.update(dict.fromkeys(no_passwords))
|
2009-04-02 21:14:19 +02:00
|
|
|
return key_passwords
|
|
|
|
|
|
|
|
|
2016-01-13 19:32:47 +01:00
|
|
|
def GetMinSdkVersion(apk_name):
|
2018-03-22 07:28:51 +01:00
|
|
|
"""Gets the minSdkVersion declared in the APK.
|
|
|
|
|
|
|
|
It calls 'aapt' to query the embedded minSdkVersion from the given APK file.
|
|
|
|
This can be both a decimal number (API Level) or a codename.
|
2016-01-13 19:32:47 +01:00
|
|
|
|
2018-03-22 07:28:51 +01:00
|
|
|
Args:
|
|
|
|
apk_name: The APK filename.
|
2016-01-13 19:32:47 +01:00
|
|
|
|
2018-03-22 07:28:51 +01:00
|
|
|
Returns:
|
|
|
|
The parsed SDK version string.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ExternalError: On failing to obtain the min SDK version.
|
|
|
|
"""
|
|
|
|
proc = Run(
|
|
|
|
["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
stdoutdata, stderrdata = proc.communicate()
|
|
|
|
if proc.returncode != 0:
|
|
|
|
raise ExternalError(
|
|
|
|
"Failed to obtain minSdkVersion: aapt return code {}:\n{}\n{}".format(
|
|
|
|
proc.returncode, stdoutdata, stderrdata))
|
|
|
|
|
|
|
|
for line in stdoutdata.split("\n"):
|
|
|
|
# Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
|
2016-01-13 19:32:47 +01:00
|
|
|
m = re.match(r'sdkVersion:\'([^\']*)\'', line)
|
|
|
|
if m:
|
|
|
|
return m.group(1)
|
|
|
|
raise ExternalError("No minSdkVersion returned by aapt")
|
|
|
|
|
|
|
|
|
|
|
|
def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
|
2018-03-22 07:28:51 +01:00
|
|
|
"""Returns the minSdkVersion declared in the APK as a number (API Level).
|
|
|
|
|
|
|
|
If minSdkVersion is set to a codename, it is translated to a number using the
|
2016-01-13 19:32:47 +01:00
|
|
|
provided map.
|
|
|
|
|
2018-03-22 07:28:51 +01:00
|
|
|
Args:
|
|
|
|
apk_name: The APK filename.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The parsed SDK version number.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ExternalError: On failing to get the min SDK version number.
|
|
|
|
"""
|
2016-01-13 19:32:47 +01:00
|
|
|
version = GetMinSdkVersion(apk_name)
|
|
|
|
try:
|
|
|
|
return int(version)
|
|
|
|
except ValueError:
|
|
|
|
# Not a decimal number. Codename?
|
|
|
|
if version in codename_to_api_level_map:
|
|
|
|
return codename_to_api_level_map[version]
|
|
|
|
else:
|
2018-03-22 07:28:51 +01:00
|
|
|
raise ExternalError(
|
|
|
|
"Unknown minSdkVersion: '{}'. Known codenames: {}".format(
|
|
|
|
version, codename_to_api_level_map))
|
2016-01-13 19:32:47 +01:00
|
|
|
|
|
|
|
|
|
|
|
def SignFile(input_name, output_name, key, password, min_api_level=None,
|
2019-03-23 07:16:58 +01:00
|
|
|
codename_to_api_level_map=None, whole_file=False,
|
|
|
|
extra_signapk_args=None):
|
2009-04-02 21:14:19 +02:00
|
|
|
"""Sign the input_name zip/jar/apk, producing output_name. Use the
|
|
|
|
given key and password (the latter may be None if the key does not
|
|
|
|
have a password.
|
|
|
|
|
2009-08-14 21:44:19 +02:00
|
|
|
If whole_file is true, use the "-w" option to SignApk to embed a
|
|
|
|
signature that covers the whole file in the archive comment of the
|
|
|
|
zip file.
|
2016-01-13 19:32:47 +01:00
|
|
|
|
|
|
|
min_api_level is the API Level (int) of the oldest platform this file may end
|
|
|
|
up on. If not specified for an APK, the API Level is obtained by interpreting
|
|
|
|
the minSdkVersion attribute of the APK's AndroidManifest.xml.
|
|
|
|
|
|
|
|
codename_to_api_level_map is needed to translate the codename which may be
|
|
|
|
encountered as the APK's minSdkVersion.
|
2019-03-23 07:16:58 +01:00
|
|
|
|
|
|
|
Caller may optionally specify extra args to be passed to SignApk, which
|
|
|
|
defaults to OPTIONS.extra_signapk_args if omitted.
|
2009-04-02 21:14:19 +02:00
|
|
|
"""
|
2017-11-21 18:25:31 +01:00
|
|
|
if codename_to_api_level_map is None:
|
|
|
|
codename_to_api_level_map = {}
|
2019-03-23 07:16:58 +01:00
|
|
|
if extra_signapk_args is None:
|
|
|
|
extra_signapk_args = OPTIONS.extra_signapk_args
|
2009-08-14 21:44:19 +02:00
|
|
|
|
2015-12-10 22:38:50 +01:00
|
|
|
java_library_path = os.path.join(
|
|
|
|
OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
|
|
|
|
|
2016-11-08 21:08:53 +01:00
|
|
|
cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
|
|
|
|
["-Djava.library.path=" + java_library_path,
|
|
|
|
"-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
|
2019-03-23 07:16:58 +01:00
|
|
|
extra_signapk_args)
|
2009-08-14 21:44:19 +02:00
|
|
|
if whole_file:
|
|
|
|
cmd.append("-w")
|
2016-01-13 19:32:47 +01:00
|
|
|
|
|
|
|
min_sdk_version = min_api_level
|
|
|
|
if min_sdk_version is None:
|
|
|
|
if not whole_file:
|
|
|
|
min_sdk_version = GetMinSdkVersionInt(
|
|
|
|
input_name, codename_to_api_level_map)
|
|
|
|
if min_sdk_version is not None:
|
|
|
|
cmd.extend(["--min-sdk-version", str(min_sdk_version)])
|
|
|
|
|
2013-03-18 18:31:26 +01:00
|
|
|
cmd.extend([key + OPTIONS.public_key_suffix,
|
|
|
|
key + OPTIONS.private_key_suffix,
|
2015-12-04 18:21:08 +01:00
|
|
|
input_name, output_name])
|
2009-08-14 21:44:19 +02:00
|
|
|
|
2018-10-05 01:25:33 +02:00
|
|
|
proc = Run(cmd, stdin=subprocess.PIPE)
|
2009-04-02 21:14:19 +02:00
|
|
|
if password is not None:
|
|
|
|
password += "\n"
|
2018-10-05 01:25:33 +02:00
|
|
|
stdoutdata, _ = proc.communicate(password)
|
|
|
|
if proc.returncode != 0:
|
2018-03-22 05:02:19 +01:00
|
|
|
raise ExternalError(
|
|
|
|
"Failed to run signapk.jar: return code {}:\n{}".format(
|
2018-10-05 01:25:33 +02:00
|
|
|
proc.returncode, stdoutdata))
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
2010-09-17 02:44:38 +02:00
|
|
|
def CheckSize(data, target, info_dict):
|
2017-11-14 20:27:32 +01:00
|
|
|
"""Checks the data string passed against the max size limit.
|
|
|
|
|
|
|
|
For non-AVB images, raise exception if the data is too big. Print a warning
|
|
|
|
if the data is nearing the maximum size.
|
2010-09-16 20:28:43 +02:00
|
|
|
|
2017-11-14 20:27:32 +01:00
|
|
|
For AVB images, the actual image size should be identical to the limit.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
data: A string that contains all the data for the partition.
|
|
|
|
target: The partition name. The ".img" suffix is optional.
|
|
|
|
info_dict: The dict to be looked up for relevant info.
|
|
|
|
"""
|
2015-03-24 03:13:21 +01:00
|
|
|
if target.endswith(".img"):
|
|
|
|
target = target[:-4]
|
2010-09-21 03:04:41 +02:00
|
|
|
mount_point = "/" + target
|
|
|
|
|
2014-06-03 23:07:27 +02:00
|
|
|
fs_type = None
|
|
|
|
limit = None
|
2010-09-21 03:04:41 +02:00
|
|
|
if info_dict["fstab"]:
|
2015-03-24 03:13:21 +01:00
|
|
|
if mount_point == "/userdata":
|
|
|
|
mount_point = "/data"
|
2010-09-21 03:04:41 +02:00
|
|
|
p = info_dict["fstab"][mount_point]
|
|
|
|
fs_type = p.fs_type
|
2012-02-14 18:32:52 +01:00
|
|
|
device = p.device
|
|
|
|
if "/" in device:
|
|
|
|
device = device[device.rfind("/")+1:]
|
2017-11-21 18:25:31 +01:00
|
|
|
limit = info_dict.get(device + "_size")
|
2015-03-24 03:13:21 +01:00
|
|
|
if not fs_type or not limit:
|
|
|
|
return
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2012-02-14 18:32:52 +01:00
|
|
|
size = len(data)
|
2017-11-14 20:27:32 +01:00
|
|
|
# target could be 'userdata' or 'cache'. They should follow the non-AVB image
|
|
|
|
# path.
|
|
|
|
if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
|
|
|
|
if size != limit:
|
|
|
|
raise ExternalError(
|
|
|
|
"Mismatching image size for %s: expected %d actual %d" % (
|
|
|
|
target, limit, size))
|
|
|
|
else:
|
|
|
|
pct = float(size) * 100.0 / limit
|
|
|
|
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
|
|
|
|
if pct >= 99.0:
|
|
|
|
raise ExternalError(msg)
|
|
|
|
elif pct >= 95.0:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("\n WARNING: %s\n", msg)
|
|
|
|
else:
|
|
|
|
logger.info(" %s", msg)
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
2009-12-16 00:06:55 +01:00
|
|
|
def ReadApkCerts(tf_zip):
|
2018-01-05 20:17:34 +01:00
|
|
|
"""Parses the APK certs info from a given target-files zip.
|
|
|
|
|
|
|
|
Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
|
|
|
|
tuple with the following elements: (1) a dictionary that maps packages to
|
|
|
|
certs (based on the "certificate" and "private_key" attributes in the file;
|
|
|
|
(2) a string representing the extension of compressed APKs in the target files
|
|
|
|
(e.g ".gz", ".bro").
|
|
|
|
|
|
|
|
Args:
|
|
|
|
tf_zip: The input target_files ZipFile (already open).
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
(certmap, ext): certmap is a dictionary that maps packages to certs; ext is
|
|
|
|
the extension string of compressed APKs (e.g. ".gz"), or None if there's
|
|
|
|
no compressed APKs.
|
|
|
|
"""
|
2009-12-16 00:06:55 +01:00
|
|
|
certmap = {}
|
2017-08-14 15:49:21 +02:00
|
|
|
compressed_extension = None
|
|
|
|
|
2017-09-09 04:02:54 +02:00
|
|
|
# META/apkcerts.txt contains the info for _all_ the packages known at build
|
|
|
|
# time. Filter out the ones that are not installed.
|
|
|
|
installed_files = set()
|
|
|
|
for name in tf_zip.namelist():
|
|
|
|
basename = os.path.basename(name)
|
|
|
|
if basename:
|
|
|
|
installed_files.add(basename)
|
|
|
|
|
2017-12-02 01:19:46 +01:00
|
|
|
for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
|
2009-12-16 00:06:55 +01:00
|
|
|
line = line.strip()
|
2015-03-24 03:13:21 +01:00
|
|
|
if not line:
|
|
|
|
continue
|
2018-01-05 20:17:34 +01:00
|
|
|
m = re.match(
|
|
|
|
r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
|
|
|
|
r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*)")?$',
|
|
|
|
line)
|
|
|
|
if not m:
|
|
|
|
continue
|
|
|
|
|
|
|
|
matches = m.groupdict()
|
|
|
|
cert = matches["CERT"]
|
|
|
|
privkey = matches["PRIVKEY"]
|
|
|
|
name = matches["NAME"]
|
|
|
|
this_compressed_extension = matches["COMPRESSED"]
|
|
|
|
|
|
|
|
public_key_suffix_len = len(OPTIONS.public_key_suffix)
|
|
|
|
private_key_suffix_len = len(OPTIONS.private_key_suffix)
|
|
|
|
if cert in SPECIAL_CERT_STRINGS and not privkey:
|
|
|
|
certmap[name] = cert
|
|
|
|
elif (cert.endswith(OPTIONS.public_key_suffix) and
|
|
|
|
privkey.endswith(OPTIONS.private_key_suffix) and
|
|
|
|
cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
|
|
|
|
certmap[name] = cert[:-public_key_suffix_len]
|
|
|
|
else:
|
|
|
|
raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
|
|
|
|
|
|
|
|
if not this_compressed_extension:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Only count the installed files.
|
|
|
|
filename = name + '.' + this_compressed_extension
|
|
|
|
if filename not in installed_files:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Make sure that all the values in the compression map have the same
|
|
|
|
# extension. We don't support multiple compression methods in the same
|
|
|
|
# system image.
|
|
|
|
if compressed_extension:
|
|
|
|
if this_compressed_extension != compressed_extension:
|
|
|
|
raise ValueError(
|
|
|
|
"Multiple compressed extensions: {} vs {}".format(
|
|
|
|
compressed_extension, this_compressed_extension))
|
|
|
|
else:
|
|
|
|
compressed_extension = this_compressed_extension
|
|
|
|
|
|
|
|
return (certmap,
|
|
|
|
("." + compressed_extension) if compressed_extension else None)
|
2009-12-16 00:06:55 +01:00
|
|
|
|
|
|
|
|
2009-04-02 21:14:19 +02:00
|
|
|
COMMON_DOCSTRING = """
|
2018-04-24 00:32:53 +02:00
|
|
|
Global options
|
|
|
|
|
|
|
|
-p (--path) <dir>
|
|
|
|
Prepend <dir>/bin to the list of places to search for binaries run by this
|
|
|
|
script, and expect to find jars in <dir>/framework.
|
2009-04-02 21:14:19 +02:00
|
|
|
|
2009-06-22 20:32:31 +02:00
|
|
|
-s (--device_specific) <file>
|
2018-04-24 00:32:53 +02:00
|
|
|
Path to the Python module containing device-specific releasetools code.
|
2009-06-22 20:32:31 +02:00
|
|
|
|
2018-04-24 00:32:53 +02:00
|
|
|
-x (--extra) <key=value>
|
|
|
|
Add a key/value pair to the 'extras' dict, which device-specific extension
|
|
|
|
code may look at.
|
2009-12-01 00:37:14 +01:00
|
|
|
|
2009-04-02 21:14:19 +02:00
|
|
|
-v (--verbose)
|
|
|
|
Show command lines being executed.
|
|
|
|
|
|
|
|
-h (--help)
|
|
|
|
Display this usage message and exit.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def Usage(docstring):
|
2017-01-10 19:47:58 +01:00
|
|
|
print(docstring.rstrip("\n"))
|
|
|
|
print(COMMON_DOCSTRING)
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
|
|
|
|
def ParseOptions(argv,
|
|
|
|
docstring,
|
|
|
|
extra_opts="", extra_long_opts=(),
|
|
|
|
extra_option_handler=None):
|
|
|
|
"""Parse the options in argv and return any arguments that aren't
|
|
|
|
flags. docstring is the calling module's docstring, to be displayed
|
|
|
|
for errors and -h. extra_opts and extra_long_opts are for flags
|
|
|
|
defined by the caller, which are processed by passing them to
|
|
|
|
extra_option_handler."""
|
|
|
|
|
|
|
|
try:
|
|
|
|
opts, args = getopt.getopt(
|
2009-12-01 00:37:14 +01:00
|
|
|
argv, "hvp:s:x:" + extra_opts,
|
2015-12-10 22:38:50 +01:00
|
|
|
["help", "verbose", "path=", "signapk_path=",
|
|
|
|
"signapk_shared_library_path=", "extra_signapk_args=",
|
2014-09-06 02:36:20 +02:00
|
|
|
"java_path=", "java_args=", "public_key_suffix=",
|
2015-06-10 00:48:14 +02:00
|
|
|
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
|
|
|
|
"verity_signer_path=", "verity_signer_args=", "device_specific=",
|
2014-11-20 18:52:05 +01:00
|
|
|
"extra="] +
|
2013-03-18 18:31:26 +01:00
|
|
|
list(extra_long_opts))
|
2015-03-24 03:13:21 +01:00
|
|
|
except getopt.GetoptError as err:
|
2009-04-02 21:14:19 +02:00
|
|
|
Usage(docstring)
|
2017-01-10 19:47:58 +01:00
|
|
|
print("**", str(err), "**")
|
2009-04-02 21:14:19 +02:00
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
for o, a in opts:
|
|
|
|
if o in ("-h", "--help"):
|
|
|
|
Usage(docstring)
|
|
|
|
sys.exit()
|
|
|
|
elif o in ("-v", "--verbose"):
|
|
|
|
OPTIONS.verbose = True
|
|
|
|
elif o in ("-p", "--path"):
|
2009-06-18 17:35:12 +02:00
|
|
|
OPTIONS.search_path = a
|
2013-03-18 18:31:26 +01:00
|
|
|
elif o in ("--signapk_path",):
|
|
|
|
OPTIONS.signapk_path = a
|
2015-12-10 22:38:50 +01:00
|
|
|
elif o in ("--signapk_shared_library_path",):
|
|
|
|
OPTIONS.signapk_shared_library_path = a
|
2013-03-18 18:31:26 +01:00
|
|
|
elif o in ("--extra_signapk_args",):
|
|
|
|
OPTIONS.extra_signapk_args = shlex.split(a)
|
|
|
|
elif o in ("--java_path",):
|
|
|
|
OPTIONS.java_path = a
|
2014-09-05 20:18:07 +02:00
|
|
|
elif o in ("--java_args",):
|
2016-11-08 21:08:53 +01:00
|
|
|
OPTIONS.java_args = shlex.split(a)
|
2013-03-18 18:31:26 +01:00
|
|
|
elif o in ("--public_key_suffix",):
|
|
|
|
OPTIONS.public_key_suffix = a
|
|
|
|
elif o in ("--private_key_suffix",):
|
|
|
|
OPTIONS.private_key_suffix = a
|
2014-11-20 18:52:05 +01:00
|
|
|
elif o in ("--boot_signer_path",):
|
|
|
|
OPTIONS.boot_signer_path = a
|
2015-06-10 00:48:14 +02:00
|
|
|
elif o in ("--boot_signer_args",):
|
|
|
|
OPTIONS.boot_signer_args = shlex.split(a)
|
|
|
|
elif o in ("--verity_signer_path",):
|
|
|
|
OPTIONS.verity_signer_path = a
|
|
|
|
elif o in ("--verity_signer_args",):
|
|
|
|
OPTIONS.verity_signer_args = shlex.split(a)
|
2009-06-22 20:32:31 +02:00
|
|
|
elif o in ("-s", "--device_specific"):
|
|
|
|
OPTIONS.device_specific = a
|
2009-12-04 01:36:20 +01:00
|
|
|
elif o in ("-x", "--extra"):
|
2009-12-01 00:37:14 +01:00
|
|
|
key, value = a.split("=", 1)
|
|
|
|
OPTIONS.extras[key] = value
|
2009-04-02 21:14:19 +02:00
|
|
|
else:
|
|
|
|
if extra_option_handler is None or not extra_option_handler(o, a):
|
|
|
|
assert False, "unknown option \"%s\"" % (o,)
|
|
|
|
|
2014-09-09 23:59:20 +02:00
|
|
|
if OPTIONS.search_path:
|
|
|
|
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
|
|
|
|
os.pathsep + os.environ["PATH"])
|
2009-04-02 21:14:19 +02:00
|
|
|
|
|
|
|
return args
|
|
|
|
|
|
|
|
|
2016-09-19 22:54:38 +02:00
|
|
|
def MakeTempFile(prefix='tmp', suffix=''):
|
2014-08-26 22:10:25 +02:00
|
|
|
"""Make a temp file and add it to the list of things to be deleted
|
|
|
|
when Cleanup() is called. Return the filename."""
|
|
|
|
fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
|
|
|
|
os.close(fd)
|
|
|
|
OPTIONS.tempfiles.append(fn)
|
|
|
|
return fn
|
|
|
|
|
|
|
|
|
2017-12-25 19:43:47 +01:00
|
|
|
def MakeTempDir(prefix='tmp', suffix=''):
|
|
|
|
"""Makes a temporary dir that will be cleaned up with a call to Cleanup().
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The absolute pathname of the new directory.
|
|
|
|
"""
|
|
|
|
dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
|
|
|
|
OPTIONS.tempfiles.append(dir_name)
|
|
|
|
return dir_name
|
|
|
|
|
|
|
|
|
2009-04-02 21:14:19 +02:00
|
|
|
def Cleanup():
|
|
|
|
for i in OPTIONS.tempfiles:
|
|
|
|
if os.path.isdir(i):
|
2017-12-25 19:43:47 +01:00
|
|
|
shutil.rmtree(i, ignore_errors=True)
|
2009-04-02 21:14:19 +02:00
|
|
|
else:
|
|
|
|
os.remove(i)
|
2017-12-25 19:43:47 +01:00
|
|
|
del OPTIONS.tempfiles[:]
|
2009-05-22 22:34:54 +02:00
|
|
|
|
|
|
|
|
|
|
|
class PasswordManager(object):
|
|
|
|
def __init__(self):
|
2017-11-21 18:25:31 +01:00
|
|
|
self.editor = os.getenv("EDITOR")
|
|
|
|
self.pwfile = os.getenv("ANDROID_PW_FILE")
|
2009-05-22 22:34:54 +02:00
|
|
|
|
|
|
|
def GetPasswords(self, items):
|
|
|
|
"""Get passwords corresponding to each string in 'items',
|
|
|
|
returning a dict. (The dict may have keys in addition to the
|
|
|
|
values in 'items'.)
|
|
|
|
|
|
|
|
Uses the passwords in $ANDROID_PW_FILE if available, letting the
|
|
|
|
user edit that file to add more needed passwords. If no editor is
|
|
|
|
available, or $ANDROID_PW_FILE isn't define, prompts the user
|
|
|
|
interactively in the ordinary way.
|
|
|
|
"""
|
|
|
|
|
|
|
|
current = self.ReadFile()
|
|
|
|
|
|
|
|
first = True
|
|
|
|
while True:
|
|
|
|
missing = []
|
|
|
|
for i in items:
|
|
|
|
if i not in current or not current[i]:
|
|
|
|
missing.append(i)
|
|
|
|
# Are all the passwords already in the file?
|
2015-03-24 03:13:21 +01:00
|
|
|
if not missing:
|
|
|
|
return current
|
2009-05-22 22:34:54 +02:00
|
|
|
|
|
|
|
for i in missing:
|
|
|
|
current[i] = ""
|
|
|
|
|
|
|
|
if not first:
|
2017-01-10 19:47:58 +01:00
|
|
|
print("key file %s still missing some passwords." % (self.pwfile,))
|
2017-12-02 01:19:46 +01:00
|
|
|
if sys.version_info[0] >= 3:
|
|
|
|
raw_input = input # pylint: disable=redefined-builtin
|
2009-05-22 22:34:54 +02:00
|
|
|
answer = raw_input("try to edit again? [y]> ").strip()
|
|
|
|
if answer and answer[0] not in 'yY':
|
|
|
|
raise RuntimeError("key passwords unavailable")
|
|
|
|
first = False
|
|
|
|
|
|
|
|
current = self.UpdateAndReadFile(current)
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
def PromptResult(self, current): # pylint: disable=no-self-use
|
2009-05-22 22:34:54 +02:00
|
|
|
"""Prompt the user to enter a value (password) for each key in
|
|
|
|
'current' whose value is fales. Returns a new dict with all the
|
|
|
|
values.
|
|
|
|
"""
|
|
|
|
result = {}
|
|
|
|
for k, v in sorted(current.iteritems()):
|
|
|
|
if v:
|
|
|
|
result[k] = v
|
|
|
|
else:
|
|
|
|
while True:
|
2015-03-24 03:13:21 +01:00
|
|
|
result[k] = getpass.getpass(
|
|
|
|
"Enter password for %s key> " % k).strip()
|
|
|
|
if result[k]:
|
|
|
|
break
|
2009-05-22 22:34:54 +02:00
|
|
|
return result
|
|
|
|
|
|
|
|
def UpdateAndReadFile(self, current):
|
|
|
|
if not self.editor or not self.pwfile:
|
|
|
|
return self.PromptResult(current)
|
|
|
|
|
|
|
|
f = open(self.pwfile, "w")
|
2015-03-24 03:13:21 +01:00
|
|
|
os.chmod(self.pwfile, 0o600)
|
2009-05-22 22:34:54 +02:00
|
|
|
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
|
|
|
|
f.write("# (Additional spaces are harmless.)\n\n")
|
|
|
|
|
|
|
|
first_line = None
|
2015-03-24 03:13:21 +01:00
|
|
|
sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
|
|
|
|
for i, (_, k, v) in enumerate(sorted_list):
|
2009-05-22 22:34:54 +02:00
|
|
|
f.write("[[[ %s ]]] %s\n" % (v, k))
|
|
|
|
if not v and first_line is None:
|
|
|
|
# position cursor on first line with no password.
|
|
|
|
first_line = i + 4
|
|
|
|
f.close()
|
|
|
|
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
|
2009-05-22 22:34:54 +02:00
|
|
|
|
|
|
|
return self.ReadFile()
|
|
|
|
|
|
|
|
def ReadFile(self):
|
|
|
|
result = {}
|
2015-03-24 03:13:21 +01:00
|
|
|
if self.pwfile is None:
|
|
|
|
return result
|
2009-05-22 22:34:54 +02:00
|
|
|
try:
|
|
|
|
f = open(self.pwfile, "r")
|
|
|
|
for line in f:
|
|
|
|
line = line.strip()
|
2015-03-24 03:13:21 +01:00
|
|
|
if not line or line[0] == '#':
|
|
|
|
continue
|
2009-05-22 22:34:54 +02:00
|
|
|
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
|
|
|
|
if not m:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("Failed to parse password file: %s", line)
|
2009-05-22 22:34:54 +02:00
|
|
|
else:
|
|
|
|
result[m.group(2)] = m.group(1)
|
|
|
|
f.close()
|
2015-03-24 03:13:21 +01:00
|
|
|
except IOError as e:
|
2009-05-22 22:34:54 +02:00
|
|
|
if e.errno != errno.ENOENT:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.exception("Error reading password file:")
|
2009-05-22 22:34:54 +02:00
|
|
|
return result
|
2009-06-15 23:31:53 +02:00
|
|
|
|
|
|
|
|
2015-01-28 00:53:15 +01:00
|
|
|
def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
|
|
|
|
compress_type=None):
|
|
|
|
import datetime
|
|
|
|
|
|
|
|
# http://b/18015246
|
|
|
|
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
|
|
|
|
# for files larger than 2GiB. We can work around this by adjusting their
|
|
|
|
# limit. Note that `zipfile.writestr()` will not work for strings larger than
|
|
|
|
# 2GiB. The Python interpreter sometimes rejects strings that large (though
|
|
|
|
# it isn't clear to me exactly what circumstances cause this).
|
|
|
|
# `zipfile.write()` must be used directly to work around this.
|
|
|
|
#
|
|
|
|
# This mess can be avoided if we port to python3.
|
|
|
|
saved_zip64_limit = zipfile.ZIP64_LIMIT
|
|
|
|
zipfile.ZIP64_LIMIT = (1 << 32) - 1
|
|
|
|
|
|
|
|
if compress_type is None:
|
|
|
|
compress_type = zip_file.compression
|
|
|
|
if arcname is None:
|
|
|
|
arcname = filename
|
|
|
|
|
|
|
|
saved_stat = os.stat(filename)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
|
|
|
|
# file to be zipped and reset it when we're done.
|
|
|
|
os.chmod(filename, perms)
|
|
|
|
|
|
|
|
# Use a fixed timestamp so the output is repeatable.
|
2018-08-01 03:32:00 +02:00
|
|
|
# Note: Use of fromtimestamp rather than utcfromtimestamp here is
|
|
|
|
# intentional. zip stores datetimes in local time without a time zone
|
|
|
|
# attached, so we need "epoch" but in the local time zone to get 2009/01/01
|
|
|
|
# in the zip archive.
|
|
|
|
local_epoch = datetime.datetime.fromtimestamp(0)
|
|
|
|
timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
|
2015-01-28 00:53:15 +01:00
|
|
|
os.utime(filename, (timestamp, timestamp))
|
|
|
|
|
|
|
|
zip_file.write(filename, arcname=arcname, compress_type=compress_type)
|
|
|
|
finally:
|
|
|
|
os.chmod(filename, saved_stat.st_mode)
|
|
|
|
os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
|
|
|
|
zipfile.ZIP64_LIMIT = saved_zip64_limit
|
|
|
|
|
|
|
|
|
2015-05-20 18:32:18 +02:00
|
|
|
def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
|
2015-04-01 20:21:55 +02:00
|
|
|
compress_type=None):
|
|
|
|
"""Wrap zipfile.writestr() function to work around the zip64 limit.
|
|
|
|
|
|
|
|
Even with the ZIP64_LIMIT workaround, it won't allow writing a string
|
|
|
|
longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
|
|
|
|
when calling crc32(bytes).
|
|
|
|
|
|
|
|
But it still works fine to write a shorter string into a large zip file.
|
|
|
|
We should use ZipWrite() whenever possible, and only use ZipWriteStr()
|
|
|
|
when we know the string won't be too long.
|
|
|
|
"""
|
|
|
|
|
|
|
|
saved_zip64_limit = zipfile.ZIP64_LIMIT
|
|
|
|
zipfile.ZIP64_LIMIT = (1 << 32) - 1
|
|
|
|
|
|
|
|
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
|
|
|
|
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
|
2015-03-24 03:13:21 +01:00
|
|
|
zinfo.compress_type = zip_file.compression
|
2015-05-20 18:32:18 +02:00
|
|
|
if perms is None:
|
2015-07-11 02:18:23 +02:00
|
|
|
perms = 0o100644
|
2014-02-07 04:45:10 +01:00
|
|
|
else:
|
2015-04-01 20:21:55 +02:00
|
|
|
zinfo = zinfo_or_arcname
|
2019-06-19 01:29:37 +02:00
|
|
|
# Python 2 and 3 behave differently when calling ZipFile.writestr() with
|
|
|
|
# zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
|
|
|
|
# such a case (since
|
|
|
|
# https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
|
|
|
|
# which seems to make more sense. Otherwise the entry will have 0o000 as the
|
|
|
|
# permission bits. We follow the logic in Python 3 to get consistent
|
|
|
|
# behavior between using the two versions.
|
|
|
|
if not zinfo.external_attr:
|
|
|
|
zinfo.external_attr = 0o600 << 16
|
2015-04-01 20:21:55 +02:00
|
|
|
|
|
|
|
# If compress_type is given, it overrides the value in zinfo.
|
|
|
|
if compress_type is not None:
|
|
|
|
zinfo.compress_type = compress_type
|
|
|
|
|
2015-05-20 18:32:18 +02:00
|
|
|
# If perms is given, it has a priority.
|
|
|
|
if perms is not None:
|
2015-07-11 02:18:23 +02:00
|
|
|
# If perms doesn't set the file type, mark it as a regular file.
|
|
|
|
if perms & 0o770000 == 0:
|
|
|
|
perms |= 0o100000
|
2015-05-20 18:32:18 +02:00
|
|
|
zinfo.external_attr = perms << 16
|
|
|
|
|
2015-04-01 20:21:55 +02:00
|
|
|
# Use a fixed timestamp so the output is repeatable.
|
|
|
|
zinfo.date_time = (2009, 1, 1, 0, 0, 0)
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
zip_file.writestr(zinfo, data)
|
2015-04-01 20:21:55 +02:00
|
|
|
zipfile.ZIP64_LIMIT = saved_zip64_limit
|
|
|
|
|
|
|
|
|
2017-12-15 02:05:33 +01:00
|
|
|
def ZipDelete(zip_filename, entries):
|
|
|
|
"""Deletes entries from a ZIP file.
|
|
|
|
|
|
|
|
Since deleting entries from a ZIP file is not supported, it shells out to
|
|
|
|
'zip -d'.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
zip_filename: The name of the ZIP file.
|
|
|
|
entries: The name of the entry, or the list of names to be deleted.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: In case of non-zero return from 'zip'.
|
|
|
|
"""
|
2019-06-19 01:29:37 +02:00
|
|
|
if isinstance(entries, str):
|
2017-12-15 02:05:33 +01:00
|
|
|
entries = [entries]
|
|
|
|
cmd = ["zip", "-d", zip_filename] + entries
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(cmd)
|
2017-12-15 02:05:33 +01:00
|
|
|
|
|
|
|
|
2015-04-01 20:21:55 +02:00
|
|
|
def ZipClose(zip_file):
|
|
|
|
# http://b/18015246
|
|
|
|
# zipfile also refers to ZIP64_LIMIT during close() when it writes out the
|
|
|
|
# central directory.
|
|
|
|
saved_zip64_limit = zipfile.ZIP64_LIMIT
|
|
|
|
zipfile.ZIP64_LIMIT = (1 << 32) - 1
|
|
|
|
|
|
|
|
zip_file.close()
|
|
|
|
|
|
|
|
zipfile.ZIP64_LIMIT = saved_zip64_limit
|
2009-06-22 20:32:31 +02:00
|
|
|
|
|
|
|
|
|
|
|
class DeviceSpecificParams(object):
|
|
|
|
module = None
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
"""Keyword arguments to the constructor become attributes of this
|
|
|
|
object, which is passed to all functions in the device-specific
|
|
|
|
module."""
|
|
|
|
for k, v in kwargs.iteritems():
|
|
|
|
setattr(self, k, v)
|
2009-12-01 00:37:14 +01:00
|
|
|
self.extras = OPTIONS.extras
|
2009-06-22 20:32:31 +02:00
|
|
|
|
|
|
|
if self.module is None:
|
|
|
|
path = OPTIONS.device_specific
|
2015-03-24 03:13:21 +01:00
|
|
|
if not path:
|
|
|
|
return
|
2009-06-24 23:34:57 +02:00
|
|
|
try:
|
|
|
|
if os.path.isdir(path):
|
|
|
|
info = imp.find_module("releasetools", [path])
|
|
|
|
else:
|
|
|
|
d, f = os.path.split(path)
|
|
|
|
b, x = os.path.splitext(f)
|
|
|
|
if x == ".py":
|
|
|
|
f = b
|
|
|
|
info = imp.find_module(f, [d])
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("loaded device-specific extensions from %s", path)
|
2009-06-24 23:34:57 +02:00
|
|
|
self.module = imp.load_module("device_specific", *info)
|
|
|
|
except ImportError:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("unable to load device-specific module; assuming none")
|
2009-06-22 20:32:31 +02:00
|
|
|
|
|
|
|
def _DoCall(self, function_name, *args, **kwargs):
|
|
|
|
"""Call the named function in the device-specific module, passing
|
|
|
|
the given args and kwargs. The first argument to the call will be
|
|
|
|
the DeviceSpecific object itself. If there is no module, or the
|
|
|
|
module does not define the function, return the value of the
|
|
|
|
'default' kwarg (which itself defaults to None)."""
|
|
|
|
if self.module is None or not hasattr(self.module, function_name):
|
2017-11-21 18:25:31 +01:00
|
|
|
return kwargs.get("default")
|
2009-06-22 20:32:31 +02:00
|
|
|
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
|
|
|
|
|
|
|
|
def FullOTA_Assertions(self):
|
|
|
|
"""Called after emitting the block of assertions at the top of a
|
|
|
|
full OTA package. Implementations can add whatever additional
|
|
|
|
assertions they like."""
|
|
|
|
return self._DoCall("FullOTA_Assertions")
|
|
|
|
|
2012-01-17 19:55:37 +01:00
|
|
|
def FullOTA_InstallBegin(self):
|
|
|
|
"""Called at the start of full OTA installation."""
|
|
|
|
return self._DoCall("FullOTA_InstallBegin")
|
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
def FullOTA_GetBlockDifferences(self):
|
|
|
|
"""Called during full OTA installation and verification.
|
|
|
|
Implementation should return a list of BlockDifference objects describing
|
|
|
|
the update on each additional partitions.
|
|
|
|
"""
|
|
|
|
return self._DoCall("FullOTA_GetBlockDifferences")
|
|
|
|
|
2009-06-22 20:32:31 +02:00
|
|
|
def FullOTA_InstallEnd(self):
|
|
|
|
"""Called at the end of full OTA installation; typically this is
|
|
|
|
used to install the image for the device's baseband processor."""
|
|
|
|
return self._DoCall("FullOTA_InstallEnd")
|
|
|
|
|
|
|
|
def IncrementalOTA_Assertions(self):
|
|
|
|
"""Called after emitting the block of assertions at the top of an
|
|
|
|
incremental OTA package. Implementations can add whatever
|
|
|
|
additional assertions they like."""
|
|
|
|
return self._DoCall("IncrementalOTA_Assertions")
|
|
|
|
|
2012-01-17 19:55:37 +01:00
|
|
|
def IncrementalOTA_VerifyBegin(self):
|
|
|
|
"""Called at the start of the verification phase of incremental
|
|
|
|
OTA installation; additional checks can be placed here to abort
|
|
|
|
the script before any changes are made."""
|
|
|
|
return self._DoCall("IncrementalOTA_VerifyBegin")
|
|
|
|
|
2009-06-22 20:32:31 +02:00
|
|
|
def IncrementalOTA_VerifyEnd(self):
|
|
|
|
"""Called at the end of the verification phase of incremental OTA
|
|
|
|
installation; additional checks can be placed here to abort the
|
|
|
|
script before any changes are made."""
|
|
|
|
return self._DoCall("IncrementalOTA_VerifyEnd")
|
|
|
|
|
2012-01-17 19:55:37 +01:00
|
|
|
def IncrementalOTA_InstallBegin(self):
|
|
|
|
"""Called at the start of incremental OTA installation (after
|
|
|
|
verification is complete)."""
|
|
|
|
return self._DoCall("IncrementalOTA_InstallBegin")
|
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
def IncrementalOTA_GetBlockDifferences(self):
|
|
|
|
"""Called during incremental OTA installation and verification.
|
|
|
|
Implementation should return a list of BlockDifference objects describing
|
|
|
|
the update on each additional partitions.
|
|
|
|
"""
|
|
|
|
return self._DoCall("IncrementalOTA_GetBlockDifferences")
|
|
|
|
|
2009-06-22 20:32:31 +02:00
|
|
|
def IncrementalOTA_InstallEnd(self):
|
|
|
|
"""Called at the end of incremental OTA installation; typically
|
|
|
|
this is used to install the image for the device's baseband
|
|
|
|
processor."""
|
|
|
|
return self._DoCall("IncrementalOTA_InstallEnd")
|
2010-09-13 00:26:16 +02:00
|
|
|
|
2015-11-10 01:58:28 +01:00
|
|
|
def VerifyOTA_Assertions(self):
|
|
|
|
return self._DoCall("VerifyOTA_Assertions")
|
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
|
2010-09-13 00:26:16 +02:00
|
|
|
class File(object):
|
2017-11-21 18:25:31 +01:00
|
|
|
def __init__(self, name, data, compress_size=None):
|
2010-09-13 00:26:16 +02:00
|
|
|
self.name = name
|
|
|
|
self.data = data
|
|
|
|
self.size = len(data)
|
2016-10-13 06:40:46 +02:00
|
|
|
self.compress_size = compress_size or self.size
|
2011-01-26 02:03:34 +01:00
|
|
|
self.sha1 = sha1(data).hexdigest()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def FromLocalFile(cls, name, diskname):
|
|
|
|
f = open(diskname, "rb")
|
|
|
|
data = f.read()
|
|
|
|
f.close()
|
|
|
|
return File(name, data)
|
2010-09-13 00:26:16 +02:00
|
|
|
|
|
|
|
def WriteToTemp(self):
|
|
|
|
t = tempfile.NamedTemporaryFile()
|
|
|
|
t.write(self.data)
|
|
|
|
t.flush()
|
|
|
|
return t
|
|
|
|
|
2017-03-06 04:51:56 +01:00
|
|
|
def WriteToDir(self, d):
|
|
|
|
with open(os.path.join(d, self.name), "wb") as fp:
|
|
|
|
fp.write(self.data)
|
|
|
|
|
2014-02-07 04:45:10 +01:00
|
|
|
def AddToZip(self, z, compression=None):
|
2015-04-01 20:21:55 +02:00
|
|
|
ZipWriteStr(z, self.name, self.data, compress_type=compression)
|
2010-09-13 00:26:16 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
|
2010-09-13 00:26:16 +02:00
|
|
|
DIFF_PROGRAM_BY_EXT = {
|
|
|
|
".gz" : "imgdiff",
|
|
|
|
".zip" : ["imgdiff", "-z"],
|
|
|
|
".jar" : ["imgdiff", "-z"],
|
|
|
|
".apk" : ["imgdiff", "-z"],
|
|
|
|
".img" : "imgdiff",
|
|
|
|
}
|
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
|
2010-09-13 00:26:16 +02:00
|
|
|
class Difference(object):
|
2012-08-15 01:36:15 +02:00
|
|
|
def __init__(self, tf, sf, diff_program=None):
|
2010-09-13 00:26:16 +02:00
|
|
|
self.tf = tf
|
|
|
|
self.sf = sf
|
|
|
|
self.patch = None
|
2012-08-15 01:36:15 +02:00
|
|
|
self.diff_program = diff_program
|
2010-09-13 00:26:16 +02:00
|
|
|
|
|
|
|
def ComputePatch(self):
|
|
|
|
"""Compute the patch (as a string of data) needed to turn sf into
|
|
|
|
tf. Returns the same tuple as GetPatch()."""
|
|
|
|
|
|
|
|
tf = self.tf
|
|
|
|
sf = self.sf
|
|
|
|
|
2012-08-15 01:36:15 +02:00
|
|
|
if self.diff_program:
|
|
|
|
diff_program = self.diff_program
|
|
|
|
else:
|
|
|
|
ext = os.path.splitext(tf.name)[1]
|
|
|
|
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
|
2010-09-13 00:26:16 +02:00
|
|
|
|
|
|
|
ttemp = tf.WriteToTemp()
|
|
|
|
stemp = sf.WriteToTemp()
|
|
|
|
|
|
|
|
ext = os.path.splitext(tf.name)[1]
|
|
|
|
|
|
|
|
try:
|
|
|
|
ptemp = tempfile.NamedTemporaryFile()
|
|
|
|
if isinstance(diff_program, list):
|
|
|
|
cmd = copy.copy(diff_program)
|
|
|
|
else:
|
|
|
|
cmd = [diff_program]
|
|
|
|
cmd.append(stemp.name)
|
|
|
|
cmd.append(ttemp.name)
|
|
|
|
cmd.append(ptemp.name)
|
|
|
|
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2014-08-05 19:39:37 +02:00
|
|
|
err = []
|
|
|
|
def run():
|
|
|
|
_, e = p.communicate()
|
2015-03-24 03:13:21 +01:00
|
|
|
if e:
|
|
|
|
err.append(e)
|
2014-08-05 19:39:37 +02:00
|
|
|
th = threading.Thread(target=run)
|
|
|
|
th.start()
|
|
|
|
th.join(timeout=300) # 5 mins
|
|
|
|
if th.is_alive():
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("diff command timed out")
|
2014-08-05 19:39:37 +02:00
|
|
|
p.terminate()
|
|
|
|
th.join(5)
|
|
|
|
if th.is_alive():
|
|
|
|
p.kill()
|
|
|
|
th.join()
|
|
|
|
|
2018-01-06 00:15:54 +01:00
|
|
|
if p.returncode != 0:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
|
2014-08-05 19:39:37 +02:00
|
|
|
self.patch = None
|
|
|
|
return None, None, None
|
2010-09-13 00:26:16 +02:00
|
|
|
diff = ptemp.read()
|
|
|
|
finally:
|
|
|
|
ptemp.close()
|
|
|
|
stemp.close()
|
|
|
|
ttemp.close()
|
|
|
|
|
|
|
|
self.patch = diff
|
|
|
|
return self.tf, self.sf, self.patch
|
|
|
|
|
|
|
|
|
|
|
|
def GetPatch(self):
|
2017-11-21 18:25:31 +01:00
|
|
|
"""Returns a tuple of (target_file, source_file, patch_data).
|
|
|
|
|
2010-09-13 00:26:16 +02:00
|
|
|
patch_data may be None if ComputePatch hasn't been called, or if
|
2017-11-21 18:25:31 +01:00
|
|
|
computing the patch failed.
|
|
|
|
"""
|
2010-09-13 00:26:16 +02:00
|
|
|
return self.tf, self.sf, self.patch
|
|
|
|
|
|
|
|
|
|
|
|
def ComputeDifferences(diffs):
|
|
|
|
"""Call ComputePatch on all the Difference objects in 'diffs'."""
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("%d diffs to compute", len(diffs))
|
2010-09-13 00:26:16 +02:00
|
|
|
|
|
|
|
# Do the largest files first, to try and reduce the long-pole effect.
|
|
|
|
by_size = [(i.tf.size, i) for i in diffs]
|
|
|
|
by_size.sort(reverse=True)
|
|
|
|
by_size = [i[1] for i in by_size]
|
|
|
|
|
|
|
|
lock = threading.Lock()
|
|
|
|
diff_iter = iter(by_size) # accessed under lock
|
|
|
|
|
|
|
|
def worker():
|
|
|
|
try:
|
|
|
|
lock.acquire()
|
|
|
|
for d in diff_iter:
|
|
|
|
lock.release()
|
|
|
|
start = time.time()
|
|
|
|
d.ComputePatch()
|
|
|
|
dur = time.time() - start
|
|
|
|
lock.acquire()
|
|
|
|
|
|
|
|
tf, sf, patch = d.GetPatch()
|
|
|
|
if sf.name == tf.name:
|
|
|
|
name = tf.name
|
|
|
|
else:
|
|
|
|
name = "%s (%s)" % (tf.name, sf.name)
|
|
|
|
if patch is None:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.error("patching failed! %40s", name)
|
2010-09-13 00:26:16 +02:00
|
|
|
else:
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info(
|
|
|
|
"%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
|
|
|
|
tf.size, 100.0 * len(patch) / tf.size, name)
|
2010-09-13 00:26:16 +02:00
|
|
|
lock.release()
|
2018-10-12 19:30:39 +02:00
|
|
|
except Exception:
|
|
|
|
logger.exception("Failed to compute diff from worker")
|
2010-09-13 00:26:16 +02:00
|
|
|
raise
|
|
|
|
|
|
|
|
# start worker threads; wait for them all to finish.
|
|
|
|
threads = [threading.Thread(target=worker)
|
|
|
|
for i in range(OPTIONS.worker_threads)]
|
|
|
|
for th in threads:
|
|
|
|
th.start()
|
|
|
|
while threads:
|
|
|
|
threads.pop().join()
|
2010-09-26 23:57:41 +02:00
|
|
|
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
class BlockDifference(object):
|
|
|
|
def __init__(self, partition, tgt, src=None, check_first_block=False,
|
2016-06-11 21:19:23 +02:00
|
|
|
version=None, disable_imgdiff=False):
|
2014-08-26 19:40:28 +02:00
|
|
|
self.tgt = tgt
|
|
|
|
self.src = src
|
|
|
|
self.partition = partition
|
2014-09-11 18:34:56 +02:00
|
|
|
self.check_first_block = check_first_block
|
2016-06-11 21:19:23 +02:00
|
|
|
self.disable_imgdiff = disable_imgdiff
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2015-03-12 20:32:37 +01:00
|
|
|
if version is None:
|
2017-12-21 20:47:01 +01:00
|
|
|
version = max(
|
|
|
|
int(i) for i in
|
|
|
|
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
|
2017-03-01 23:36:26 +01:00
|
|
|
assert version >= 3
|
2015-03-12 20:32:37 +01:00
|
|
|
self.version = version
|
2014-09-08 17:29:55 +02:00
|
|
|
|
|
|
|
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
|
2016-06-11 21:19:23 +02:00
|
|
|
version=self.version,
|
|
|
|
disable_imgdiff=self.disable_imgdiff)
|
2018-02-28 20:11:00 +01:00
|
|
|
self.path = os.path.join(MakeTempDir(), partition)
|
2014-08-26 19:40:28 +02:00
|
|
|
b.Compute(self.path)
|
2016-02-04 23:26:02 +01:00
|
|
|
self._required_cache = b.max_stashed_size
|
2016-04-13 00:53:16 +02:00
|
|
|
self.touched_src_ranges = b.touched_src_ranges
|
|
|
|
self.touched_src_sha1 = b.touched_src_sha1
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
# On devices with dynamic partitions, for new partitions,
|
|
|
|
# src is None but OPTIONS.source_info_dict is not.
|
|
|
|
if OPTIONS.source_info_dict is None:
|
|
|
|
is_dynamic_build = OPTIONS.info_dict.get(
|
|
|
|
"use_dynamic_partitions") == "true"
|
2019-01-25 21:30:58 +01:00
|
|
|
is_dynamic_source = False
|
2015-10-17 00:26:34 +02:00
|
|
|
else:
|
2018-12-28 02:34:18 +01:00
|
|
|
is_dynamic_build = OPTIONS.source_info_dict.get(
|
|
|
|
"use_dynamic_partitions") == "true"
|
2019-01-25 21:30:58 +01:00
|
|
|
is_dynamic_source = partition in shlex.split(
|
|
|
|
OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
|
2018-12-28 02:34:18 +01:00
|
|
|
|
2019-01-25 21:30:58 +01:00
|
|
|
is_dynamic_target = partition in shlex.split(
|
2018-12-28 02:34:18 +01:00
|
|
|
OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
|
|
|
|
|
2019-01-25 21:30:58 +01:00
|
|
|
# For dynamic partitions builds, check partition list in both source
|
|
|
|
# and target build because new partitions may be added, and existing
|
|
|
|
# partitions may be removed.
|
|
|
|
is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
|
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
if is_dynamic:
|
|
|
|
self.device = 'map_partition("%s")' % partition
|
|
|
|
else:
|
|
|
|
if OPTIONS.source_info_dict is None:
|
|
|
|
_, device_path = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
|
|
|
|
else:
|
|
|
|
_, device_path = GetTypeAndDevice("/" + partition,
|
|
|
|
OPTIONS.source_info_dict)
|
|
|
|
self.device = '"%s"' % device_path
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2016-02-04 23:26:02 +01:00
|
|
|
@property
|
|
|
|
def required_cache(self):
|
|
|
|
return self._required_cache
|
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
def WriteScript(self, script, output_zip, progress=None,
|
|
|
|
write_verify_script=False):
|
2014-08-26 19:40:28 +02:00
|
|
|
if not self.src:
|
|
|
|
# write the output unconditionally
|
2015-01-06 19:59:53 +01:00
|
|
|
script.Print("Patching %s image unconditionally..." % (self.partition,))
|
|
|
|
else:
|
|
|
|
script.Print("Patching %s image after verification." % (self.partition,))
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
if progress:
|
|
|
|
script.ShowProgress(progress, 0)
|
2015-01-06 19:59:53 +01:00
|
|
|
self._WriteUpdate(script, output_zip)
|
2017-11-21 18:25:31 +01:00
|
|
|
|
|
|
|
if write_verify_script:
|
2018-12-28 02:34:18 +01:00
|
|
|
self.WritePostInstallVerifyScript(script)
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2015-11-10 01:58:28 +01:00
|
|
|
def WriteStrictVerifyScript(self, script):
|
|
|
|
"""Verify all the blocks in the care_map, including clobbered blocks.
|
|
|
|
|
|
|
|
This differs from the WriteVerifyScript() function: a) it prints different
|
|
|
|
error messages; b) it doesn't allow half-way updated images to pass the
|
|
|
|
verification."""
|
|
|
|
|
|
|
|
partition = self.partition
|
|
|
|
script.Print("Verifying %s..." % (partition,))
|
|
|
|
ranges = self.tgt.care_map
|
|
|
|
ranges_str = ranges.to_string_raw()
|
2017-11-21 18:25:31 +01:00
|
|
|
script.AppendExtra(
|
2018-12-28 02:34:18 +01:00
|
|
|
'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || '
|
|
|
|
'ui_print("%s has unexpected contents.");' % (
|
2017-11-21 18:25:31 +01:00
|
|
|
self.device, ranges_str,
|
|
|
|
self.tgt.TotalSha1(include_clobbered_blocks=True),
|
2018-12-28 02:34:18 +01:00
|
|
|
self.partition))
|
2015-11-10 01:58:28 +01:00
|
|
|
script.AppendExtra("")
|
|
|
|
|
2016-04-13 00:53:16 +02:00
|
|
|
def WriteVerifyScript(self, script, touched_blocks_only=False):
|
2014-12-09 17:40:34 +01:00
|
|
|
partition = self.partition
|
2016-04-15 00:58:05 +02:00
|
|
|
|
|
|
|
# full OTA
|
2015-01-06 19:59:53 +01:00
|
|
|
if not self.src:
|
2014-12-09 17:40:34 +01:00
|
|
|
script.Print("Image %s will be patched unconditionally." % (partition,))
|
2016-04-15 00:58:05 +02:00
|
|
|
|
|
|
|
# incremental OTA
|
2014-08-26 19:40:28 +02:00
|
|
|
else:
|
2017-03-01 23:36:26 +01:00
|
|
|
if touched_blocks_only:
|
2016-04-13 00:53:16 +02:00
|
|
|
ranges = self.touched_src_ranges
|
|
|
|
expected_sha1 = self.touched_src_sha1
|
|
|
|
else:
|
|
|
|
ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
|
|
|
|
expected_sha1 = self.src.TotalSha1()
|
2016-04-15 00:58:05 +02:00
|
|
|
|
|
|
|
# No blocks to be checked, skipping.
|
|
|
|
if not ranges:
|
|
|
|
return
|
|
|
|
|
2015-05-12 20:42:31 +02:00
|
|
|
ranges_str = ranges.to_string_raw()
|
2017-11-21 18:25:31 +01:00
|
|
|
script.AppendExtra(
|
2018-12-28 02:34:18 +01:00
|
|
|
'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
|
2017-11-21 18:25:31 +01:00
|
|
|
'package_extract_file("%s.transfer.list"), "%s.new.dat", '
|
|
|
|
'"%s.patch.dat")) then' % (
|
|
|
|
self.device, ranges_str, expected_sha1,
|
|
|
|
self.device, partition, partition, partition))
|
2015-03-12 20:32:37 +01:00
|
|
|
script.Print('Verified %s image...' % (partition,))
|
2015-03-24 03:13:21 +01:00
|
|
|
script.AppendExtra('else')
|
2014-12-09 17:40:34 +01:00
|
|
|
|
2015-12-15 20:53:59 +01:00
|
|
|
if self.version >= 4:
|
|
|
|
|
|
|
|
# Bug: 21124327
|
|
|
|
# When generating incrementals for the system and vendor partitions in
|
|
|
|
# version 4 or newer, explicitly check the first block (which contains
|
|
|
|
# the superblock) of the partition to see if it's what we expect. If
|
|
|
|
# this check fails, give an explicit log message about the partition
|
|
|
|
# having been remounted R/W (the most likely explanation).
|
|
|
|
if self.check_first_block:
|
2018-12-28 02:34:18 +01:00
|
|
|
script.AppendExtra('check_first_block(%s);' % (self.device,))
|
2015-12-15 20:53:59 +01:00
|
|
|
|
|
|
|
# If version >= 4, try block recovery before abort update
|
2016-05-25 02:34:52 +02:00
|
|
|
if partition == "system":
|
|
|
|
code = ErrorCode.SYSTEM_RECOVER_FAILURE
|
|
|
|
else:
|
|
|
|
code = ErrorCode.VENDOR_RECOVER_FAILURE
|
2015-12-15 20:53:59 +01:00
|
|
|
script.AppendExtra((
|
2018-12-28 02:34:18 +01:00
|
|
|
'ifelse (block_image_recover({device}, "{ranges}") && '
|
|
|
|
'block_image_verify({device}, '
|
2015-12-15 20:53:59 +01:00
|
|
|
'package_extract_file("{partition}.transfer.list"), '
|
|
|
|
'"{partition}.new.dat", "{partition}.patch.dat"), '
|
|
|
|
'ui_print("{partition} recovered successfully."), '
|
2016-05-25 02:34:52 +02:00
|
|
|
'abort("E{code}: {partition} partition fails to recover"));\n'
|
2015-12-15 20:53:59 +01:00
|
|
|
'endif;').format(device=self.device, ranges=ranges_str,
|
2016-05-25 02:34:52 +02:00
|
|
|
partition=partition, code=code))
|
2014-09-11 18:34:56 +02:00
|
|
|
|
2015-03-12 20:32:37 +01:00
|
|
|
# Abort the OTA update. Note that the incremental OTA cannot be applied
|
|
|
|
# even if it may match the checksum of the target partition.
|
|
|
|
# a) If version < 3, operations like move and erase will make changes
|
|
|
|
# unconditionally and damage the partition.
|
|
|
|
# b) If version >= 3, it won't even reach here.
|
2015-12-15 20:53:59 +01:00
|
|
|
else:
|
2016-05-25 02:34:52 +02:00
|
|
|
if partition == "system":
|
|
|
|
code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
|
|
|
|
else:
|
|
|
|
code = ErrorCode.VENDOR_VERIFICATION_FAILURE
|
|
|
|
script.AppendExtra((
|
|
|
|
'abort("E%d: %s partition has unexpected contents");\n'
|
|
|
|
'endif;') % (code, partition))
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
def WritePostInstallVerifyScript(self, script):
|
2015-06-01 22:40:49 +02:00
|
|
|
partition = self.partition
|
|
|
|
script.Print('Verifying the updated %s image...' % (partition,))
|
|
|
|
# Unlike pre-install verification, clobbered_blocks should not be ignored.
|
|
|
|
ranges = self.tgt.care_map
|
|
|
|
ranges_str = ranges.to_string_raw()
|
2017-11-21 18:25:31 +01:00
|
|
|
script.AppendExtra(
|
2018-12-28 02:34:18 +01:00
|
|
|
'if range_sha1(%s, "%s") == "%s" then' % (
|
2017-11-21 18:25:31 +01:00
|
|
|
self.device, ranges_str,
|
|
|
|
self.tgt.TotalSha1(include_clobbered_blocks=True)))
|
2015-07-10 02:37:49 +02:00
|
|
|
|
|
|
|
# Bug: 20881595
|
|
|
|
# Verify that extended blocks are really zeroed out.
|
|
|
|
if self.tgt.extended:
|
|
|
|
ranges_str = self.tgt.extended.to_string_raw()
|
2017-11-21 18:25:31 +01:00
|
|
|
script.AppendExtra(
|
2018-12-28 02:34:18 +01:00
|
|
|
'if range_sha1(%s, "%s") == "%s" then' % (
|
2017-11-21 18:25:31 +01:00
|
|
|
self.device, ranges_str,
|
|
|
|
self._HashZeroBlocks(self.tgt.extended.size())))
|
2015-07-10 02:37:49 +02:00
|
|
|
script.Print('Verified the updated %s image.' % (partition,))
|
2016-05-25 02:34:52 +02:00
|
|
|
if partition == "system":
|
|
|
|
code = ErrorCode.SYSTEM_NONZERO_CONTENTS
|
|
|
|
else:
|
|
|
|
code = ErrorCode.VENDOR_NONZERO_CONTENTS
|
2015-07-10 02:37:49 +02:00
|
|
|
script.AppendExtra(
|
|
|
|
'else\n'
|
2016-05-25 02:34:52 +02:00
|
|
|
' abort("E%d: %s partition has unexpected non-zero contents after '
|
|
|
|
'OTA update");\n'
|
|
|
|
'endif;' % (code, partition))
|
2015-07-10 02:37:49 +02:00
|
|
|
else:
|
|
|
|
script.Print('Verified the updated %s image.' % (partition,))
|
|
|
|
|
2016-05-25 02:34:52 +02:00
|
|
|
if partition == "system":
|
|
|
|
code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
|
|
|
|
else:
|
|
|
|
code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
|
|
|
|
|
2015-06-01 22:40:49 +02:00
|
|
|
script.AppendExtra(
|
|
|
|
'else\n'
|
2016-05-25 02:34:52 +02:00
|
|
|
' abort("E%d: %s partition has unexpected contents after OTA '
|
|
|
|
'update");\n'
|
|
|
|
'endif;' % (code, partition))
|
2015-06-01 22:40:49 +02:00
|
|
|
|
2014-08-26 19:40:28 +02:00
|
|
|
def _WriteUpdate(self, script, output_zip):
|
2015-01-28 00:53:15 +01:00
|
|
|
ZipWrite(output_zip,
|
|
|
|
'{}.transfer.list'.format(self.path),
|
|
|
|
'{}.transfer.list'.format(self.partition))
|
2017-07-07 00:13:59 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
# For full OTA, compress the new.dat with brotli with quality 6 to reduce
|
|
|
|
# its size. Quailty 9 almost triples the compression time but doesn't
|
|
|
|
# further reduce the size too much. For a typical 1.8G system.new.dat
|
2017-07-07 00:13:59 +02:00
|
|
|
# zip | brotli(quality 6) | brotli(quality 9)
|
|
|
|
# compressed_size: 942M | 869M (~8% reduced) | 854M
|
|
|
|
# compression_time: 75s | 265s | 719s
|
|
|
|
# decompression_time: 15s | 25s | 25s
|
|
|
|
|
|
|
|
if not self.src:
|
2017-11-09 23:53:42 +01:00
|
|
|
brotli_cmd = ['brotli', '--quality=6',
|
|
|
|
'--output={}.new.dat.br'.format(self.path),
|
|
|
|
'{}.new.dat'.format(self.path)]
|
2017-07-07 00:13:59 +02:00
|
|
|
print("Compressing {}.new.dat with brotli".format(self.partition))
|
2018-10-05 00:46:16 +02:00
|
|
|
RunAndCheckOutput(brotli_cmd)
|
2017-07-07 00:13:59 +02:00
|
|
|
|
|
|
|
new_data_name = '{}.new.dat.br'.format(self.partition)
|
|
|
|
ZipWrite(output_zip,
|
|
|
|
'{}.new.dat.br'.format(self.path),
|
|
|
|
new_data_name,
|
|
|
|
compress_type=zipfile.ZIP_STORED)
|
|
|
|
else:
|
|
|
|
new_data_name = '{}.new.dat'.format(self.partition)
|
|
|
|
ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
|
|
|
|
|
2015-01-28 00:53:15 +01:00
|
|
|
ZipWrite(output_zip,
|
|
|
|
'{}.patch.dat'.format(self.path),
|
|
|
|
'{}.patch.dat'.format(self.partition),
|
|
|
|
compress_type=zipfile.ZIP_STORED)
|
|
|
|
|
2016-05-25 02:34:52 +02:00
|
|
|
if self.partition == "system":
|
|
|
|
code = ErrorCode.SYSTEM_UPDATE_FAILURE
|
|
|
|
else:
|
|
|
|
code = ErrorCode.VENDOR_UPDATE_FAILURE
|
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
call = ('block_image_update({device}, '
|
2015-01-28 00:53:15 +01:00
|
|
|
'package_extract_file("{partition}.transfer.list"), '
|
2017-07-07 00:13:59 +02:00
|
|
|
'"{new_data_name}", "{partition}.patch.dat") ||\n'
|
2016-05-25 02:34:52 +02:00
|
|
|
' abort("E{code}: Failed to update {partition} image.");'.format(
|
2017-07-07 00:13:59 +02:00
|
|
|
device=self.device, partition=self.partition,
|
|
|
|
new_data_name=new_data_name, code=code))
|
2015-03-24 03:13:21 +01:00
|
|
|
script.AppendExtra(script.WordWrap(call))
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
|
2014-12-09 17:40:34 +01:00
|
|
|
data = source.ReadRangeSet(ranges)
|
|
|
|
ctx = sha1()
|
|
|
|
|
|
|
|
for p in data:
|
|
|
|
ctx.update(p)
|
|
|
|
|
|
|
|
return ctx.hexdigest()
|
|
|
|
|
2015-07-10 02:37:49 +02:00
|
|
|
def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
|
|
|
|
"""Return the hash value for all zero blocks."""
|
|
|
|
zero_block = '\x00' * 4096
|
|
|
|
ctx = sha1()
|
|
|
|
for _ in range(num_blocks):
|
|
|
|
ctx.update(zero_block)
|
|
|
|
|
|
|
|
return ctx.hexdigest()
|
|
|
|
|
2014-08-26 19:40:28 +02:00
|
|
|
|
|
|
|
DataImage = blockimgdiff.DataImage
|
2019-04-05 00:37:57 +02:00
|
|
|
EmptyImage = blockimgdiff.EmptyImage
|
2017-11-21 18:25:31 +01:00
|
|
|
|
2010-09-26 23:57:41 +02:00
|
|
|
# map recovery.fstab's fs_types to mount/format "partition types"
|
2015-03-24 03:13:21 +01:00
|
|
|
PARTITION_TYPES = {
|
|
|
|
"ext4": "EMMC",
|
|
|
|
"emmc": "EMMC",
|
2015-05-02 00:39:36 +02:00
|
|
|
"f2fs": "EMMC",
|
|
|
|
"squashfs": "EMMC"
|
2015-03-24 03:13:21 +01:00
|
|
|
}
|
2010-09-26 23:57:41 +02:00
|
|
|
|
2017-11-21 18:25:31 +01:00
|
|
|
|
2010-09-26 23:57:41 +02:00
|
|
|
def GetTypeAndDevice(mount_point, info):
|
|
|
|
fstab = info["fstab"]
|
|
|
|
if fstab:
|
2015-03-24 03:13:21 +01:00
|
|
|
return (PARTITION_TYPES[fstab[mount_point].fs_type],
|
|
|
|
fstab[mount_point].device)
|
2010-09-26 23:57:41 +02:00
|
|
|
else:
|
2015-03-24 03:13:21 +01:00
|
|
|
raise KeyError
|
2013-11-13 01:22:34 +01:00
|
|
|
|
|
|
|
|
|
|
|
def ParseCertificate(data):
|
2018-02-17 02:12:54 +01:00
|
|
|
"""Parses and converts a PEM-encoded certificate into DER-encoded.
|
|
|
|
|
|
|
|
This gives the same result as `openssl x509 -in <filename> -outform DER`.
|
|
|
|
|
|
|
|
Returns:
|
2017-12-02 01:19:46 +01:00
|
|
|
The decoded certificate bytes.
|
2018-02-17 02:12:54 +01:00
|
|
|
"""
|
|
|
|
cert_buffer = []
|
2013-11-13 01:22:34 +01:00
|
|
|
save = False
|
|
|
|
for line in data.split("\n"):
|
|
|
|
if "--END CERTIFICATE--" in line:
|
|
|
|
break
|
|
|
|
if save:
|
2018-02-17 02:12:54 +01:00
|
|
|
cert_buffer.append(line)
|
2013-11-13 01:22:34 +01:00
|
|
|
if "--BEGIN CERTIFICATE--" in line:
|
|
|
|
save = True
|
2017-12-02 01:19:46 +01:00
|
|
|
cert = base64.b64decode("".join(cert_buffer))
|
2013-11-13 01:22:34 +01:00
|
|
|
return cert
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2018-02-04 21:13:35 +01:00
|
|
|
|
|
|
|
def ExtractPublicKey(cert):
|
|
|
|
"""Extracts the public key (PEM-encoded) from the given certificate file.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
cert: The certificate filename.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The public key string.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: On non-zero return from 'openssl'.
|
|
|
|
"""
|
|
|
|
# The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
|
|
|
|
# While openssl 1.1 writes the key into the given filename followed by '-out',
|
|
|
|
# openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
|
|
|
|
# stdout instead.
|
|
|
|
cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
|
|
|
|
proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
|
|
pubkey, stderrdata = proc.communicate()
|
|
|
|
assert proc.returncode == 0, \
|
|
|
|
'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
|
|
|
|
return pubkey
|
|
|
|
|
|
|
|
|
2019-06-26 20:58:22 +02:00
|
|
|
def ExtractAvbPublicKey(avbtool, key):
|
2019-03-15 18:44:43 +01:00
|
|
|
"""Extracts the AVB public key from the given public or private key.
|
|
|
|
|
|
|
|
Args:
|
2019-06-26 20:58:22 +02:00
|
|
|
avbtool: The AVB tool to use.
|
2019-03-15 18:44:43 +01:00
|
|
|
key: The input key file, which should be PEM-encoded public or private key.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The path to the extracted AVB public key file.
|
|
|
|
"""
|
|
|
|
output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
|
|
|
|
RunAndCheckOutput(
|
2019-06-26 20:58:22 +02:00
|
|
|
[avbtool, 'extract_public_key', "--key", key, "--output", output])
|
2019-03-15 18:44:43 +01:00
|
|
|
return output
|
|
|
|
|
|
|
|
|
2014-02-13 19:58:24 +01:00
|
|
|
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
|
|
|
|
info_dict=None):
|
2018-03-10 02:04:42 +01:00
|
|
|
"""Generates the recovery-from-boot patch and writes the script to output.
|
|
|
|
|
|
|
|
Most of the space in the boot and recovery images is just the kernel, which is
|
|
|
|
identical for the two, so the resulting patch should be efficient. Add it to
|
|
|
|
the output zip, along with a shell script that is run from init.rc on first
|
|
|
|
boot to actually do the patching and install the new recovery image.
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2018-03-10 02:04:42 +01:00
|
|
|
Args:
|
|
|
|
input_dir: The top-level input directory of the target-files.zip.
|
|
|
|
output_sink: The callback function that writes the result.
|
|
|
|
recovery_img: File object for the recovery image.
|
|
|
|
boot_img: File objects for the boot image.
|
|
|
|
info_dict: A dict returned by common.LoadInfoDict() on the input
|
|
|
|
target_files. Will use OPTIONS.info_dict if None has been given.
|
|
|
|
"""
|
2014-02-13 19:58:24 +01:00
|
|
|
if info_dict is None:
|
|
|
|
info_dict = OPTIONS.info_dict
|
|
|
|
|
2018-03-10 02:04:42 +01:00
|
|
|
full_recovery_image = info_dict.get("full_recovery_image") == "true"
|
2015-07-22 21:33:18 +02:00
|
|
|
|
|
|
|
if full_recovery_image:
|
|
|
|
output_sink("etc/recovery.img", recovery_img.data)
|
|
|
|
|
2014-02-04 21:17:58 +01:00
|
|
|
else:
|
2018-03-10 02:04:42 +01:00
|
|
|
system_root_image = info_dict.get("system_root_image") == "true"
|
2015-07-22 21:33:18 +02:00
|
|
|
path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
|
2018-03-10 02:04:42 +01:00
|
|
|
# With system-root-image, boot and recovery images will have mismatching
|
|
|
|
# entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
|
|
|
|
# to handle such a case.
|
|
|
|
if system_root_image:
|
|
|
|
diff_program = ["bsdiff"]
|
2015-07-22 21:33:18 +02:00
|
|
|
bonus_args = ""
|
2018-03-10 02:04:42 +01:00
|
|
|
assert not os.path.exists(path)
|
|
|
|
else:
|
|
|
|
diff_program = ["imgdiff"]
|
|
|
|
if os.path.exists(path):
|
|
|
|
diff_program.append("-b")
|
|
|
|
diff_program.append(path)
|
2018-07-14 01:11:16 +02:00
|
|
|
bonus_args = "--bonus /system/etc/recovery-resource.dat"
|
2018-03-10 02:04:42 +01:00
|
|
|
else:
|
|
|
|
bonus_args = ""
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2015-07-22 21:33:18 +02:00
|
|
|
d = Difference(recovery_img, boot_img, diff_program=diff_program)
|
|
|
|
_, _, patch = d.ComputePatch()
|
|
|
|
output_sink("recovery-from-boot.p", patch)
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2015-03-28 03:11:53 +01:00
|
|
|
try:
|
2015-10-14 01:37:12 +02:00
|
|
|
# The following GetTypeAndDevice()s need to use the path in the target
|
|
|
|
# info_dict instead of source_info_dict.
|
2015-03-28 03:11:53 +01:00
|
|
|
boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
|
|
|
|
recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
|
|
|
|
except KeyError:
|
2014-07-29 20:42:37 +02:00
|
|
|
return
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2015-07-22 21:33:18 +02:00
|
|
|
if full_recovery_image:
|
|
|
|
sh = """#!/system/bin/sh
|
2018-07-14 01:11:16 +02:00
|
|
|
if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
|
|
|
|
applypatch \\
|
|
|
|
--flash /system/etc/recovery.img \\
|
|
|
|
--target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
|
|
|
|
log -t recovery "Installing new recovery image: succeeded" || \\
|
|
|
|
log -t recovery "Installing new recovery image: failed"
|
2015-07-22 21:33:18 +02:00
|
|
|
else
|
|
|
|
log -t recovery "Recovery image already installed"
|
|
|
|
fi
|
|
|
|
""" % {'type': recovery_type,
|
|
|
|
'device': recovery_device,
|
|
|
|
'sha1': recovery_img.sha1,
|
|
|
|
'size': recovery_img.size}
|
|
|
|
else:
|
|
|
|
sh = """#!/system/bin/sh
|
2018-07-14 01:11:16 +02:00
|
|
|
if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
|
|
|
|
applypatch %(bonus_args)s \\
|
|
|
|
--patch /system/recovery-from-boot.p \\
|
|
|
|
--source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
|
|
|
|
--target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
|
|
|
|
log -t recovery "Installing new recovery image: succeeded" || \\
|
|
|
|
log -t recovery "Installing new recovery image: failed"
|
2014-02-04 21:17:58 +01:00
|
|
|
else
|
|
|
|
log -t recovery "Recovery image already installed"
|
|
|
|
fi
|
2015-03-24 03:13:21 +01:00
|
|
|
""" % {'boot_size': boot_img.size,
|
|
|
|
'boot_sha1': boot_img.sha1,
|
|
|
|
'recovery_size': recovery_img.size,
|
|
|
|
'recovery_sha1': recovery_img.sha1,
|
|
|
|
'boot_type': boot_type,
|
|
|
|
'boot_device': boot_device,
|
|
|
|
'recovery_type': recovery_type,
|
|
|
|
'recovery_device': recovery_device,
|
|
|
|
'bonus_args': bonus_args}
|
2014-02-04 21:17:58 +01:00
|
|
|
|
|
|
|
# The install script location moved from /system/etc to /system/bin
|
2017-06-21 01:52:54 +02:00
|
|
|
# in the L release.
|
|
|
|
sh_location = "bin/install-recovery.sh"
|
2015-07-08 03:31:47 +02:00
|
|
|
|
2018-10-12 19:30:39 +02:00
|
|
|
logger.info("putting script in %s", sh_location)
|
2014-02-04 21:17:58 +01:00
|
|
|
|
2017-12-02 01:19:46 +01:00
|
|
|
output_sink(sh_location, sh.encode())
|
2018-12-28 02:34:18 +01:00
|
|
|
|
|
|
|
|
|
|
|
class DynamicPartitionUpdate(object):
|
|
|
|
def __init__(self, src_group=None, tgt_group=None, progress=None,
|
|
|
|
block_difference=None):
|
|
|
|
self.src_group = src_group
|
|
|
|
self.tgt_group = tgt_group
|
|
|
|
self.progress = progress
|
|
|
|
self.block_difference = block_difference
|
|
|
|
|
|
|
|
@property
|
|
|
|
def src_size(self):
|
|
|
|
if not self.block_difference:
|
|
|
|
return 0
|
|
|
|
return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tgt_size(self):
|
|
|
|
if not self.block_difference:
|
|
|
|
return 0
|
|
|
|
return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _GetSparseImageSize(img):
|
|
|
|
if not img:
|
|
|
|
return 0
|
|
|
|
return img.blocksize * img.total_blocks
|
|
|
|
|
|
|
|
|
|
|
|
class DynamicGroupUpdate(object):
|
|
|
|
def __init__(self, src_size=None, tgt_size=None):
|
|
|
|
# None: group does not exist. 0: no size limits.
|
|
|
|
self.src_size = src_size
|
|
|
|
self.tgt_size = tgt_size
|
|
|
|
|
|
|
|
|
|
|
|
class DynamicPartitionsDifference(object):
|
|
|
|
def __init__(self, info_dict, block_diffs, progress_dict=None,
|
|
|
|
source_info_dict=None):
|
|
|
|
if progress_dict is None:
|
2019-06-18 21:10:14 +02:00
|
|
|
progress_dict = {}
|
2018-12-28 02:34:18 +01:00
|
|
|
|
|
|
|
self._remove_all_before_apply = False
|
|
|
|
if source_info_dict is None:
|
|
|
|
self._remove_all_before_apply = True
|
2019-06-18 21:10:14 +02:00
|
|
|
source_info_dict = {}
|
|
|
|
|
|
|
|
block_diff_dict = collections.OrderedDict(
|
|
|
|
[(e.partition, e) for e in block_diffs])
|
2018-12-28 02:34:18 +01:00
|
|
|
|
|
|
|
assert len(block_diff_dict) == len(block_diffs), \
|
|
|
|
"Duplicated BlockDifference object for {}".format(
|
|
|
|
[partition for partition, count in
|
|
|
|
collections.Counter(e.partition for e in block_diffs).items()
|
|
|
|
if count > 1])
|
|
|
|
|
2019-01-24 01:56:19 +01:00
|
|
|
self._partition_updates = collections.OrderedDict()
|
2018-12-28 02:34:18 +01:00
|
|
|
|
|
|
|
for p, block_diff in block_diff_dict.items():
|
|
|
|
self._partition_updates[p] = DynamicPartitionUpdate()
|
|
|
|
self._partition_updates[p].block_difference = block_diff
|
|
|
|
|
|
|
|
for p, progress in progress_dict.items():
|
|
|
|
if p in self._partition_updates:
|
|
|
|
self._partition_updates[p].progress = progress
|
|
|
|
|
|
|
|
tgt_groups = shlex.split(info_dict.get(
|
|
|
|
"super_partition_groups", "").strip())
|
|
|
|
src_groups = shlex.split(source_info_dict.get(
|
|
|
|
"super_partition_groups", "").strip())
|
|
|
|
|
|
|
|
for g in tgt_groups:
|
|
|
|
for p in shlex.split(info_dict.get(
|
|
|
|
"super_%s_partition_list" % g, "").strip()):
|
|
|
|
assert p in self._partition_updates, \
|
|
|
|
"{} is in target super_{}_partition_list but no BlockDifference " \
|
|
|
|
"object is provided.".format(p, g)
|
|
|
|
self._partition_updates[p].tgt_group = g
|
|
|
|
|
|
|
|
for g in src_groups:
|
|
|
|
for p in shlex.split(source_info_dict.get(
|
|
|
|
"super_%s_partition_list" % g, "").strip()):
|
|
|
|
assert p in self._partition_updates, \
|
|
|
|
"{} is in source super_{}_partition_list but no BlockDifference " \
|
|
|
|
"object is provided.".format(p, g)
|
|
|
|
self._partition_updates[p].src_group = g
|
|
|
|
|
2019-01-18 22:55:25 +01:00
|
|
|
target_dynamic_partitions = set(shlex.split(info_dict.get(
|
|
|
|
"dynamic_partition_list", "").strip()))
|
|
|
|
block_diffs_with_target = set(p for p, u in self._partition_updates.items()
|
|
|
|
if u.tgt_size)
|
|
|
|
assert block_diffs_with_target == target_dynamic_partitions, \
|
|
|
|
"Target Dynamic partitions: {}, BlockDifference with target: {}".format(
|
|
|
|
list(target_dynamic_partitions), list(block_diffs_with_target))
|
|
|
|
|
|
|
|
source_dynamic_partitions = set(shlex.split(source_info_dict.get(
|
|
|
|
"dynamic_partition_list", "").strip()))
|
|
|
|
block_diffs_with_source = set(p for p, u in self._partition_updates.items()
|
|
|
|
if u.src_size)
|
|
|
|
assert block_diffs_with_source == source_dynamic_partitions, \
|
|
|
|
"Source Dynamic partitions: {}, BlockDifference with source: {}".format(
|
|
|
|
list(source_dynamic_partitions), list(block_diffs_with_source))
|
|
|
|
|
2018-12-28 02:34:18 +01:00
|
|
|
if self._partition_updates:
|
|
|
|
logger.info("Updating dynamic partitions %s",
|
|
|
|
self._partition_updates.keys())
|
|
|
|
|
2019-01-24 01:56:19 +01:00
|
|
|
self._group_updates = collections.OrderedDict()
|
2018-12-28 02:34:18 +01:00
|
|
|
|
|
|
|
for g in tgt_groups:
|
|
|
|
self._group_updates[g] = DynamicGroupUpdate()
|
|
|
|
self._group_updates[g].tgt_size = int(info_dict.get(
|
|
|
|
"super_%s_group_size" % g, "0").strip())
|
|
|
|
|
|
|
|
for g in src_groups:
|
|
|
|
if g not in self._group_updates:
|
|
|
|
self._group_updates[g] = DynamicGroupUpdate()
|
|
|
|
self._group_updates[g].src_size = int(source_info_dict.get(
|
|
|
|
"super_%s_group_size" % g, "0").strip())
|
|
|
|
|
|
|
|
self._Compute()
|
|
|
|
|
|
|
|
def WriteScript(self, script, output_zip, write_verify_script=False):
|
|
|
|
script.Comment('--- Start patching dynamic partitions ---')
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
|
|
|
|
script.Comment('Patch partition %s' % p)
|
|
|
|
u.block_difference.WriteScript(script, output_zip, progress=u.progress,
|
|
|
|
write_verify_script=False)
|
|
|
|
|
|
|
|
op_list_path = MakeTempFile()
|
|
|
|
with open(op_list_path, 'w') as f:
|
|
|
|
for line in self._op_list:
|
|
|
|
f.write('{}\n'.format(line))
|
|
|
|
|
|
|
|
ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
|
|
|
|
|
|
|
|
script.Comment('Update dynamic partition metadata')
|
|
|
|
script.AppendExtra('assert(update_dynamic_partitions('
|
|
|
|
'package_extract_file("dynamic_partitions_op_list")));')
|
|
|
|
|
|
|
|
if write_verify_script:
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
|
|
|
|
u.block_difference.WritePostInstallVerifyScript(script)
|
|
|
|
script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.tgt_size and u.src_size <= u.tgt_size:
|
|
|
|
script.Comment('Patch partition %s' % p)
|
|
|
|
u.block_difference.WriteScript(script, output_zip, progress=u.progress,
|
|
|
|
write_verify_script=write_verify_script)
|
|
|
|
if write_verify_script:
|
|
|
|
script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
|
|
|
|
|
|
|
|
script.Comment('--- End patching dynamic partitions ---')
|
|
|
|
|
|
|
|
def _Compute(self):
|
|
|
|
self._op_list = list()
|
|
|
|
|
|
|
|
def append(line):
|
|
|
|
self._op_list.append(line)
|
|
|
|
|
|
|
|
def comment(line):
|
|
|
|
self._op_list.append("# %s" % line)
|
|
|
|
|
|
|
|
if self._remove_all_before_apply:
|
|
|
|
comment('Remove all existing dynamic partitions and groups before '
|
|
|
|
'applying full OTA')
|
|
|
|
append('remove_all_groups')
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_group and not u.tgt_group:
|
|
|
|
append('remove %s' % p)
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
|
|
|
|
comment('Move partition %s from %s to default' % (p, u.src_group))
|
|
|
|
append('move %s default' % p)
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
|
|
|
|
comment('Shrink partition %s from %d to %d' %
|
|
|
|
(p, u.src_size, u.tgt_size))
|
|
|
|
append('resize %s %s' % (p, u.tgt_size))
|
|
|
|
|
|
|
|
for g, u in self._group_updates.items():
|
|
|
|
if u.src_size is not None and u.tgt_size is None:
|
|
|
|
append('remove_group %s' % g)
|
|
|
|
if (u.src_size is not None and u.tgt_size is not None and
|
|
|
|
u.src_size > u.tgt_size):
|
|
|
|
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
|
|
|
append('resize_group %s %d' % (g, u.tgt_size))
|
|
|
|
|
|
|
|
for g, u in self._group_updates.items():
|
|
|
|
if u.src_size is None and u.tgt_size is not None:
|
|
|
|
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
|
|
|
|
append('add_group %s %d' % (g, u.tgt_size))
|
|
|
|
if (u.src_size is not None and u.tgt_size is not None and
|
|
|
|
u.src_size < u.tgt_size):
|
|
|
|
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
|
|
|
append('resize_group %s %d' % (g, u.tgt_size))
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.tgt_group and not u.src_group:
|
|
|
|
comment('Add partition %s to group %s' % (p, u.tgt_group))
|
|
|
|
append('add %s %s' % (p, u.tgt_group))
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.tgt_size and u.src_size < u.tgt_size:
|
|
|
|
comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
|
|
|
|
append('resize %s %d' % (p, u.tgt_size))
|
|
|
|
|
|
|
|
for p, u in self._partition_updates.items():
|
|
|
|
if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
|
|
|
|
comment('Move partition %s from default to %s' %
|
|
|
|
(p, u.tgt_group))
|
|
|
|
append('move %s %s' % (p, u.tgt_group))
|