2011-10-29 02:02:30 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright (C) 2011 The Android Open Source Project
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
"""
|
2017-12-07 19:33:00 +01:00
|
|
|
Builds output_image from the given input_directory, properties_file,
|
|
|
|
and writes the image to target_output_directory.
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
If argument generated_prop_file exists, write additional properties to the file.
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
Usage: build_image.py input_directory properties_file output_image \\
|
2018-06-19 01:32:35 +02:00
|
|
|
target_output_directory [generated_prop_file]
|
2011-10-29 02:02:30 +02:00
|
|
|
"""
|
2017-12-07 19:33:00 +01:00
|
|
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
import os
|
2012-11-27 03:10:23 +01:00
|
|
|
import os.path
|
2015-06-23 20:16:05 +02:00
|
|
|
import re
|
2017-12-07 19:33:00 +01:00
|
|
|
import shlex
|
|
|
|
import shutil
|
2011-10-29 02:02:30 +02:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-06-10 00:48:14 +02:00
|
|
|
import common
|
2016-02-09 21:28:58 +01:00
|
|
|
import sparse_img
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2015-06-10 00:48:14 +02:00
|
|
|
OPTIONS = common.OPTIONS
|
|
|
|
|
2014-05-17 04:14:30 +02:00
|
|
|
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
|
2015-05-20 08:30:57 +02:00
|
|
|
BLOCK_SIZE = 4096
|
2018-06-19 01:32:35 +02:00
|
|
|
BYTES_IN_MB = 1024 * 1024
|
2014-05-17 04:14:30 +02:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
def RunCommand(cmd, verbose=None, env=None):
|
2015-06-23 20:16:05 +02:00
|
|
|
"""Echo and run the given command.
|
2012-11-27 03:10:23 +01:00
|
|
|
|
|
|
|
Args:
|
|
|
|
cmd: the command represented as a list of strings.
|
2017-09-02 00:36:08 +02:00
|
|
|
verbose: show commands being executed.
|
2018-06-19 01:32:35 +02:00
|
|
|
env: a dictionary of additional environment variables.
|
2012-11-27 03:10:23 +01:00
|
|
|
Returns:
|
2015-06-23 20:16:05 +02:00
|
|
|
A tuple of the output and the exit code.
|
2012-11-27 03:10:23 +01:00
|
|
|
"""
|
2018-06-19 01:32:35 +02:00
|
|
|
env_copy = None
|
|
|
|
if env is not None:
|
|
|
|
env_copy = os.environ.copy()
|
|
|
|
env_copy.update(env)
|
2017-09-02 00:36:08 +02:00
|
|
|
if verbose is None:
|
|
|
|
verbose = OPTIONS.verbose
|
|
|
|
if verbose:
|
|
|
|
print("Running: " + " ".join(cmd))
|
2018-06-19 01:32:35 +02:00
|
|
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
|
|
env=env_copy)
|
2015-06-23 20:16:05 +02:00
|
|
|
output, _ = p.communicate()
|
2017-09-02 00:36:08 +02:00
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print(output.rstrip())
|
2015-06-23 20:16:05 +02:00
|
|
|
return (output, p.returncode)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-05-20 08:30:57 +02:00
|
|
|
def GetVerityFECSize(partition_size):
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd = ["fec", "-s", str(partition_size)]
|
2017-09-02 00:36:08 +02:00
|
|
|
output, exit_code = RunCommand(cmd, False)
|
2017-03-08 20:05:56 +01:00
|
|
|
if exit_code != 0:
|
2015-05-20 08:30:57 +02:00
|
|
|
return False, 0
|
|
|
|
return True, int(output)
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
def GetVerityTreeSize(partition_size):
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd = ["build_verity_tree", "-s", str(partition_size)]
|
2017-09-02 00:36:08 +02:00
|
|
|
output, exit_code = RunCommand(cmd, False)
|
2017-03-08 20:05:56 +01:00
|
|
|
if exit_code != 0:
|
2013-06-17 02:26:08 +02:00
|
|
|
return False, 0
|
|
|
|
return True, int(output)
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
def GetVerityMetadataSize(partition_size):
|
2018-03-16 07:21:28 +01:00
|
|
|
cmd = ["build_verity_metadata.py", "size", str(partition_size)]
|
2017-09-02 00:36:08 +02:00
|
|
|
output, exit_code = RunCommand(cmd, False)
|
2017-03-08 20:05:56 +01:00
|
|
|
if exit_code != 0:
|
2013-06-17 02:26:08 +02:00
|
|
|
return False, 0
|
|
|
|
return True, int(output)
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-05-20 08:30:57 +02:00
|
|
|
def GetVeritySize(partition_size, fec_supported):
|
|
|
|
success, verity_tree_size = GetVerityTreeSize(partition_size)
|
|
|
|
if not success:
|
|
|
|
return 0
|
|
|
|
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
|
|
|
|
if not success:
|
|
|
|
return 0
|
|
|
|
verity_size = verity_tree_size + verity_metadata_size
|
|
|
|
if fec_supported:
|
|
|
|
success, fec_size = GetVerityFECSize(partition_size + verity_size)
|
|
|
|
if not success:
|
|
|
|
return 0
|
|
|
|
return verity_size + fec_size
|
|
|
|
return verity_size
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
def GetDiskUsage(path):
|
|
|
|
"""Return number of bytes that "path" occupies on host.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
path: The directory or file to calculate size on
|
|
|
|
Returns:
|
|
|
|
True and the number of bytes if successful,
|
|
|
|
False and 0 otherwise.
|
|
|
|
"""
|
|
|
|
env = {"POSIXLY_CORRECT": "1"}
|
|
|
|
cmd = ["du", "-s", path]
|
|
|
|
output, exit_code = RunCommand(cmd, verbose=False, env=env)
|
|
|
|
if exit_code != 0:
|
|
|
|
return False, 0
|
|
|
|
# POSIX du returns number of blocks with block size 512
|
|
|
|
return True, int(output.split()[0]) * 512
|
|
|
|
|
|
|
|
|
2016-02-09 21:28:58 +01:00
|
|
|
def GetSimgSize(image_file):
|
|
|
|
simg = sparse_img.SparseImage(image_file, build_map=False)
|
|
|
|
return simg.blocksize * simg.total_blocks
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2016-02-09 21:28:58 +01:00
|
|
|
def ZeroPadSimg(image_file, pad_size):
|
|
|
|
blocks = pad_size // BLOCK_SIZE
|
|
|
|
print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
|
|
|
|
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
|
|
|
|
simg.AppendFillChunk(0, blocks)
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2017-05-19 17:44:26 +02:00
|
|
|
def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
|
2016-09-30 23:29:22 +02:00
|
|
|
"""Calculates max image size for a given partition size.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
avbtool: String with path to avbtool.
|
2017-05-19 17:44:26 +02:00
|
|
|
footer_type: 'hash' or 'hashtree' for generating footer.
|
2016-09-30 23:29:22 +02:00
|
|
|
partition_size: The size of the partition in question.
|
|
|
|
additional_args: Additional arguments to pass to 'avbtool
|
|
|
|
add_hashtree_image'.
|
|
|
|
Returns:
|
|
|
|
The maximum image size or 0 if an error occurred.
|
|
|
|
"""
|
2017-12-07 19:33:00 +01:00
|
|
|
cmd = [avbtool, "add_%s_footer" % footer_type,
|
|
|
|
"--partition_size", partition_size, "--calc_max_image_size"]
|
2017-05-26 12:30:04 +02:00
|
|
|
cmd.extend(shlex.split(additional_args))
|
|
|
|
|
|
|
|
(output, exit_code) = RunCommand(cmd)
|
2016-09-30 23:29:22 +02:00
|
|
|
if exit_code != 0:
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
return int(output)
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2017-05-19 17:44:26 +02:00
|
|
|
def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
|
2017-09-28 02:17:43 +02:00
|
|
|
partition_name, key_path, algorithm, salt,
|
2017-05-26 12:30:04 +02:00
|
|
|
additional_args):
|
2016-09-30 23:29:22 +02:00
|
|
|
"""Adds dm-verity hashtree and AVB metadata to an image.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
image_path: Path to image to modify.
|
|
|
|
avbtool: String with path to avbtool.
|
2017-05-19 17:44:26 +02:00
|
|
|
footer_type: 'hash' or 'hashtree' for generating footer.
|
2016-09-30 23:29:22 +02:00
|
|
|
partition_size: The size of the partition in question.
|
|
|
|
partition_name: The name of the partition - will be embedded in metadata.
|
2017-05-26 12:30:04 +02:00
|
|
|
key_path: Path to key to use or None.
|
|
|
|
algorithm: Name of algorithm to use or None.
|
2017-09-28 02:17:43 +02:00
|
|
|
salt: The salt to use (a hexadecimal string) or None.
|
2016-09-30 23:29:22 +02:00
|
|
|
additional_args: Additional arguments to pass to 'avbtool
|
2017-12-07 19:33:00 +01:00
|
|
|
add_hashtree_image'.
|
|
|
|
|
2016-09-30 23:29:22 +02:00
|
|
|
Returns:
|
|
|
|
True if the operation succeeded.
|
|
|
|
"""
|
2017-12-07 19:33:00 +01:00
|
|
|
cmd = [avbtool, "add_%s_footer" % footer_type,
|
|
|
|
"--partition_size", partition_size,
|
|
|
|
"--partition_name", partition_name,
|
|
|
|
"--image", image_path]
|
2017-05-26 12:30:04 +02:00
|
|
|
|
|
|
|
if key_path and algorithm:
|
|
|
|
cmd.extend(["--key", key_path, "--algorithm", algorithm])
|
2017-09-28 02:17:43 +02:00
|
|
|
if salt:
|
|
|
|
cmd.extend(["--salt", salt])
|
2017-05-26 12:30:04 +02:00
|
|
|
|
|
|
|
cmd.extend(shlex.split(additional_args))
|
|
|
|
|
|
|
|
(_, exit_code) = RunCommand(cmd)
|
2016-09-30 23:29:22 +02:00
|
|
|
return exit_code == 0
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-05-20 08:30:57 +02:00
|
|
|
def AdjustPartitionSizeForVerity(partition_size, fec_supported):
|
2013-06-17 02:26:08 +02:00
|
|
|
"""Modifies the provided partition size to account for the verity metadata.
|
|
|
|
|
|
|
|
This information is used to size the created image appropriately.
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
Args:
|
|
|
|
partition_size: the size of the partition to be verified.
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
Returns:
|
2016-09-02 00:58:35 +02:00
|
|
|
A tuple of the size of the partition adjusted for verity metadata, and
|
|
|
|
the size of verity metadata.
|
2013-06-17 02:26:08 +02:00
|
|
|
"""
|
2015-05-20 08:30:57 +02:00
|
|
|
key = "%d %d" % (partition_size, fec_supported)
|
|
|
|
if key in AdjustPartitionSizeForVerity.results:
|
|
|
|
return AdjustPartitionSizeForVerity.results[key]
|
|
|
|
|
|
|
|
hi = partition_size
|
|
|
|
if hi % BLOCK_SIZE != 0:
|
|
|
|
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
|
|
|
|
|
|
|
|
# verity tree and fec sizes depend on the partition size, which
|
|
|
|
# means this estimate is always going to be unnecessarily small
|
2016-09-02 00:58:35 +02:00
|
|
|
verity_size = GetVeritySize(hi, fec_supported)
|
|
|
|
lo = partition_size - verity_size
|
2015-05-20 08:30:57 +02:00
|
|
|
result = lo
|
|
|
|
|
|
|
|
# do a binary search for the optimal size
|
|
|
|
while lo < hi:
|
|
|
|
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
|
2016-09-02 00:58:35 +02:00
|
|
|
v = GetVeritySize(i, fec_supported)
|
|
|
|
if i + v <= partition_size:
|
2015-05-20 08:30:57 +02:00
|
|
|
if result < i:
|
|
|
|
result = i
|
2016-09-02 00:58:35 +02:00
|
|
|
verity_size = v
|
2015-05-20 08:30:57 +02:00
|
|
|
lo = i + BLOCK_SIZE
|
|
|
|
else:
|
|
|
|
hi = i
|
|
|
|
|
2017-11-15 19:34:01 +01:00
|
|
|
if OPTIONS.verbose:
|
|
|
|
print("Adjusted partition size for verity, partition_size: {},"
|
|
|
|
" verity_size: {}".format(result, verity_size))
|
2016-09-02 00:58:35 +02:00
|
|
|
AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
|
|
|
|
return (result, verity_size)
|
2015-05-20 08:30:57 +02:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-05-20 08:30:57 +02:00
|
|
|
AdjustPartitionSizeForVerity.results = {}
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2016-09-02 00:58:35 +02:00
|
|
|
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
|
|
|
|
padding_size):
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
|
|
|
|
verity_path, verity_fec_path]
|
|
|
|
output, exit_code = RunCommand(cmd)
|
|
|
|
if exit_code != 0:
|
2017-12-07 19:33:00 +01:00
|
|
|
print("Could not build FEC data! Error: %s" % output)
|
2015-05-20 08:30:57 +02:00
|
|
|
return False
|
|
|
|
return True
|
2013-06-17 02:26:08 +02:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2014-04-17 03:49:56 +02:00
|
|
|
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
|
|
|
|
verity_image_path]
|
|
|
|
output, exit_code = RunCommand(cmd)
|
|
|
|
if exit_code != 0:
|
2017-12-07 19:33:00 +01:00
|
|
|
print("Could not build verity tree! Error: %s" % output)
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
root, salt = output.split()
|
|
|
|
prop_dict["verity_root_hash"] = root
|
|
|
|
prop_dict["verity_salt"] = salt
|
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
|
2017-10-11 10:21:48 +02:00
|
|
|
block_device, signer_path, key, signer_args,
|
|
|
|
verity_disable):
|
2018-03-16 07:21:28 +01:00
|
|
|
cmd = ["build_verity_metadata.py", "build", str(image_size),
|
|
|
|
verity_metadata_path, root_hash, salt, block_device, signer_path, key]
|
2016-10-18 01:20:12 +02:00
|
|
|
if signer_args:
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
|
2017-10-11 10:21:48 +02:00
|
|
|
if verity_disable:
|
|
|
|
cmd.append("--verity_disable")
|
2017-03-08 20:05:56 +01:00
|
|
|
output, exit_code = RunCommand(cmd)
|
|
|
|
if exit_code != 0:
|
2017-12-07 19:33:00 +01:00
|
|
|
print("Could not build verity metadata! Error: %s" % output)
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
|
|
|
|
"""Appends the unsparse image to the given sparse image.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
sparse_image_path: the path to the (sparse) image
|
|
|
|
unsparse_image_path: the path to the (unsparse) image
|
|
|
|
Returns:
|
|
|
|
True on success, False on failure.
|
|
|
|
"""
|
2017-03-08 20:05:56 +01:00
|
|
|
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
|
|
|
|
output, exit_code = RunCommand(cmd)
|
|
|
|
if exit_code != 0:
|
2017-12-07 19:33:00 +01:00
|
|
|
print("%s: %s" % (error_message, output))
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-12-18 14:24:56 +01:00
|
|
|
def Append(target, file_to_append, error_message):
|
2017-12-07 19:33:00 +01:00
|
|
|
"""Appends file_to_append to target."""
|
|
|
|
try:
|
|
|
|
with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
|
2017-03-08 20:05:56 +01:00
|
|
|
for line in input_file:
|
|
|
|
out_file.write(line)
|
2017-12-07 19:33:00 +01:00
|
|
|
except IOError:
|
|
|
|
print(error_message)
|
|
|
|
return False
|
2015-12-18 14:24:56 +01:00
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
def BuildVerifiedImage(data_image_path, verity_image_path,
|
2015-12-18 16:50:25 +01:00
|
|
|
verity_metadata_path, verity_fec_path,
|
2016-09-02 00:58:35 +02:00
|
|
|
padding_size, fec_supported):
|
2015-12-18 14:24:56 +01:00
|
|
|
if not Append(verity_image_path, verity_metadata_path,
|
|
|
|
"Could not append verity metadata!"):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
2015-12-18 16:50:25 +01:00
|
|
|
|
|
|
|
if fec_supported:
|
|
|
|
# build FEC for the entire partition, including metadata
|
|
|
|
if not BuildVerityFEC(data_image_path, verity_image_path,
|
2016-09-02 00:58:35 +02:00
|
|
|
verity_fec_path, padding_size):
|
2015-12-18 16:50:25 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
if not Append(verity_image_path, verity_fec_path, "Could not append FEC!"):
|
|
|
|
return False
|
|
|
|
|
2015-12-18 14:24:56 +01:00
|
|
|
if not Append2Simg(data_image_path, verity_image_path,
|
|
|
|
"Could not append verity data!"):
|
2015-05-20 08:30:57 +02:00
|
|
|
return False
|
2013-06-17 02:26:08 +02:00
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2013-12-06 02:09:18 +01:00
|
|
|
def UnsparseImage(sparse_image_path, replace=True):
|
2013-06-17 02:26:08 +02:00
|
|
|
img_dir = os.path.dirname(sparse_image_path)
|
|
|
|
unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path)
|
|
|
|
unsparse_image_path = os.path.join(img_dir, unsparse_image_path)
|
|
|
|
if os.path.exists(unsparse_image_path):
|
2013-12-06 02:09:18 +01:00
|
|
|
if replace:
|
|
|
|
os.unlink(unsparse_image_path)
|
|
|
|
else:
|
|
|
|
return True, unsparse_image_path
|
2013-06-17 02:26:08 +02:00
|
|
|
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
|
2018-01-19 19:29:52 +01:00
|
|
|
(inflate_output, exit_code) = RunCommand(inflate_command)
|
2013-06-17 02:26:08 +02:00
|
|
|
if exit_code != 0:
|
2018-01-19 19:29:52 +01:00
|
|
|
print("Error: '%s' failed with exit code %d:\n%s" % (
|
|
|
|
inflate_command, exit_code, inflate_output))
|
2013-06-17 02:26:08 +02:00
|
|
|
os.remove(unsparse_image_path)
|
|
|
|
return False, None
|
|
|
|
return True, unsparse_image_path
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2015-05-20 08:30:57 +02:00
|
|
|
def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
|
2013-06-17 02:26:08 +02:00
|
|
|
"""Creates an image that is verifiable using dm-verity.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
out_file: the location to write the verifiable image at
|
2015-03-24 03:13:21 +01:00
|
|
|
prop_dict: a dictionary of properties required for image creation and
|
|
|
|
verification
|
2013-06-17 02:26:08 +02:00
|
|
|
Returns:
|
|
|
|
True on success, False otherwise.
|
|
|
|
"""
|
|
|
|
# get properties
|
2016-09-02 00:58:35 +02:00
|
|
|
image_size = int(prop_dict["partition_size"])
|
2013-06-17 02:26:08 +02:00
|
|
|
block_dev = prop_dict["verity_block_device"]
|
2014-11-14 02:54:30 +01:00
|
|
|
signer_key = prop_dict["verity_key"] + ".pk8"
|
2015-06-10 00:48:14 +02:00
|
|
|
if OPTIONS.verity_signer_path is not None:
|
2016-10-18 01:20:12 +02:00
|
|
|
signer_path = OPTIONS.verity_signer_path
|
2015-06-10 00:48:14 +02:00
|
|
|
else:
|
|
|
|
signer_path = prop_dict["verity_signer_cmd"]
|
2016-10-18 01:20:12 +02:00
|
|
|
signer_args = OPTIONS.verity_signer_args
|
2013-06-17 02:26:08 +02:00
|
|
|
|
|
|
|
# make a tempdir
|
2017-12-25 19:43:47 +01:00
|
|
|
tempdir_name = common.MakeTempDir(suffix="_verity_images")
|
2013-06-17 02:26:08 +02:00
|
|
|
|
|
|
|
# get partial image paths
|
|
|
|
verity_image_path = os.path.join(tempdir_name, "verity.img")
|
|
|
|
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
|
2015-05-20 08:30:57 +02:00
|
|
|
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
|
2013-06-17 02:26:08 +02:00
|
|
|
|
|
|
|
# build the verity tree and get the root hash and salt
|
2014-04-17 03:49:56 +02:00
|
|
|
if not BuildVerityTree(out_file, verity_image_path, prop_dict):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# build the metadata blocks
|
|
|
|
root_hash = prop_dict["verity_root_hash"]
|
|
|
|
salt = prop_dict["verity_salt"]
|
2017-10-11 10:21:48 +02:00
|
|
|
verity_disable = "verity_disable" in prop_dict
|
2015-03-24 03:13:21 +01:00
|
|
|
if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
|
2017-10-11 10:21:48 +02:00
|
|
|
block_dev, signer_path, signer_key, signer_args,
|
|
|
|
verity_disable):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# build the full verified image
|
2016-09-02 00:58:35 +02:00
|
|
|
target_size = int(prop_dict["original_partition_size"])
|
|
|
|
verity_size = int(prop_dict["verity_size"])
|
|
|
|
|
|
|
|
padding_size = target_size - image_size - verity_size
|
|
|
|
assert padding_size >= 0
|
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
if not BuildVerifiedImage(out_file,
|
|
|
|
verity_image_path,
|
2015-12-18 16:50:25 +01:00
|
|
|
verity_metadata_path,
|
|
|
|
verity_fec_path,
|
2016-09-02 00:58:35 +02:00
|
|
|
padding_size,
|
2015-12-18 16:50:25 +01:00
|
|
|
fec_supported):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
|
2016-03-03 06:07:23 +01:00
|
|
|
def ConvertBlockMapToBaseFs(block_map_file):
|
2017-12-25 19:43:47 +01:00
|
|
|
base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
|
2016-03-03 06:07:23 +01:00
|
|
|
convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
|
|
|
|
(_, exit_code) = RunCommand(convert_command)
|
2017-12-07 19:33:00 +01:00
|
|
|
return base_fs_file if exit_code == 0 else None
|
2016-03-03 06:07:23 +01:00
|
|
|
|
2017-12-08 08:01:25 +01:00
|
|
|
|
|
|
|
def CheckHeadroom(ext4fs_output, prop_dict):
|
|
|
|
"""Checks if there's enough headroom space available.
|
|
|
|
|
|
|
|
Headroom is the reserved space on system image (via PRODUCT_SYSTEM_HEADROOM),
|
|
|
|
which is useful for devices with low disk space that have system image
|
|
|
|
variation between builds. The 'partition_headroom' in prop_dict is the size
|
|
|
|
in bytes, while the numbers in 'ext4fs_output' are for 4K-blocks.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
ext4fs_output: The output string from mke2fs command.
|
|
|
|
prop_dict: The property dict.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The check result.
|
2018-01-03 06:19:27 +01:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: On invalid input.
|
2017-12-08 08:01:25 +01:00
|
|
|
"""
|
2018-01-03 06:19:27 +01:00
|
|
|
assert ext4fs_output is not None
|
|
|
|
assert prop_dict.get('fs_type', '').startswith('ext4')
|
|
|
|
assert 'partition_headroom' in prop_dict
|
|
|
|
assert 'mount_point' in prop_dict
|
|
|
|
|
2017-12-08 08:01:25 +01:00
|
|
|
ext4fs_stats = re.compile(
|
|
|
|
r'Created filesystem with .* (?P<used_blocks>[0-9]+)/'
|
|
|
|
r'(?P<total_blocks>[0-9]+) blocks')
|
2017-12-07 19:33:00 +01:00
|
|
|
last_line = ext4fs_output.strip().split('\n')[-1]
|
|
|
|
m = ext4fs_stats.match(last_line)
|
2017-12-08 08:01:25 +01:00
|
|
|
used_blocks = int(m.groupdict().get('used_blocks'))
|
|
|
|
total_blocks = int(m.groupdict().get('total_blocks'))
|
2018-01-03 06:19:27 +01:00
|
|
|
headroom_blocks = int(prop_dict['partition_headroom']) / BLOCK_SIZE
|
2017-12-08 08:01:25 +01:00
|
|
|
adjusted_blocks = total_blocks - headroom_blocks
|
|
|
|
if used_blocks > adjusted_blocks:
|
2018-01-03 06:19:27 +01:00
|
|
|
mount_point = prop_dict["mount_point"]
|
2017-12-08 08:01:25 +01:00
|
|
|
print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
|
|
|
|
"headroom: %d blocks, available: %d blocks)" % (
|
|
|
|
mount_point, total_blocks, used_blocks, headroom_blocks,
|
|
|
|
adjusted_blocks))
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2015-07-09 18:54:55 +02:00
|
|
|
def BuildImage(in_dir, prop_dict, out_file, target_out=None):
|
2011-10-29 02:02:30 +02:00
|
|
|
"""Build an image to out_file from in_dir with property prop_dict.
|
2018-06-19 01:32:35 +02:00
|
|
|
After the function call, values in prop_dict is updated with
|
|
|
|
computed values.
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
Args:
|
|
|
|
in_dir: path of input directory.
|
|
|
|
prop_dict: property dictionary.
|
|
|
|
out_file: path of the output image file.
|
2017-12-07 19:33:00 +01:00
|
|
|
target_out: path of the product out directory to read device specific FS
|
|
|
|
config files.
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
True iff the image is built successfully.
|
|
|
|
"""
|
2015-04-01 20:21:55 +02:00
|
|
|
# system_root_image=true: build a system.img that combines the contents of
|
2018-07-21 00:20:28 +02:00
|
|
|
# /system and root, which should be mounted at the root of the file system.
|
2015-03-25 03:07:40 +01:00
|
|
|
origin_in = in_dir
|
|
|
|
fs_config = prop_dict.get("fs_config")
|
2017-12-07 19:33:00 +01:00
|
|
|
if (prop_dict.get("system_root_image") == "true" and
|
|
|
|
prop_dict["mount_point"] == "system"):
|
2017-12-25 19:43:47 +01:00
|
|
|
in_dir = common.MakeTempDir()
|
2017-12-07 19:33:00 +01:00
|
|
|
# Change the mount point to "/".
|
2015-03-25 03:07:40 +01:00
|
|
|
prop_dict["mount_point"] = "/"
|
|
|
|
if fs_config:
|
2018-07-21 00:20:28 +02:00
|
|
|
# We need to merge the fs_config files of system and root.
|
|
|
|
merged_fs_config = common.MakeTempFile(prefix="merged_fs_config",
|
2017-12-25 19:43:47 +01:00
|
|
|
suffix=".txt")
|
2015-03-25 03:07:40 +01:00
|
|
|
with open(merged_fs_config, "w") as fw:
|
2018-07-21 00:20:28 +02:00
|
|
|
if "root_fs_config" in prop_dict:
|
|
|
|
with open(prop_dict["root_fs_config"]) as fr:
|
2015-03-25 03:07:40 +01:00
|
|
|
fw.writelines(fr.readlines())
|
|
|
|
with open(fs_config) as fr:
|
|
|
|
fw.writelines(fr.readlines())
|
|
|
|
fs_config = merged_fs_config
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
build_command = []
|
|
|
|
fs_type = prop_dict.get("fs_type", "")
|
2017-12-07 19:33:00 +01:00
|
|
|
run_e2fsck = False
|
2013-06-17 02:26:08 +02:00
|
|
|
|
2015-03-24 20:42:03 +01:00
|
|
|
fs_spans_partition = True
|
|
|
|
if fs_type.startswith("squash"):
|
2015-06-23 20:16:05 +02:00
|
|
|
fs_spans_partition = False
|
2015-03-24 20:42:03 +01:00
|
|
|
|
2014-07-11 00:42:38 +02:00
|
|
|
is_verity_partition = "verity_block_device" in prop_dict
|
2014-05-06 07:19:37 +02:00
|
|
|
verity_supported = prop_dict.get("verity") == "true"
|
2015-05-20 08:30:57 +02:00
|
|
|
verity_fec_supported = prop_dict.get("verity_fec") == "true"
|
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
if (prop_dict.get("use_logical_partitions") == "true" and
|
|
|
|
"partition_size" not in prop_dict):
|
|
|
|
# if partition_size is not defined, use output of `du' + reserved_size
|
|
|
|
success, size = GetDiskUsage(origin_in)
|
|
|
|
if not success:
|
|
|
|
return False
|
|
|
|
if OPTIONS.verbose:
|
|
|
|
print("The tree size of %s is %d MB." % (origin_in, size // BYTES_IN_MB))
|
|
|
|
size += int(prop_dict.get("partition_reserved_size", 0))
|
|
|
|
# Round this up to a multiple of 4K so that avbtool works
|
|
|
|
size = common.RoundUpTo4K(size)
|
|
|
|
prop_dict["partition_size"] = str(size)
|
|
|
|
if OPTIONS.verbose:
|
|
|
|
print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
|
|
|
|
|
2015-06-23 20:16:05 +02:00
|
|
|
# Adjust the partition size to make room for the hashes if this is to be
|
|
|
|
# verified.
|
2016-02-09 21:28:58 +01:00
|
|
|
if verity_supported and is_verity_partition:
|
2013-06-17 02:26:08 +02:00
|
|
|
partition_size = int(prop_dict.get("partition_size"))
|
2017-12-07 19:33:00 +01:00
|
|
|
(adjusted_size, verity_size) = AdjustPartitionSizeForVerity(
|
|
|
|
partition_size, verity_fec_supported)
|
2013-06-17 02:26:08 +02:00
|
|
|
if not adjusted_size:
|
|
|
|
return False
|
|
|
|
prop_dict["partition_size"] = str(adjusted_size)
|
|
|
|
prop_dict["original_partition_size"] = str(partition_size)
|
2016-09-02 00:58:35 +02:00
|
|
|
prop_dict["verity_size"] = str(verity_size)
|
2013-06-17 02:26:08 +02:00
|
|
|
|
2017-05-19 17:44:26 +02:00
|
|
|
# Adjust partition size for AVB hash footer or AVB hashtree footer.
|
|
|
|
avb_footer_type = ''
|
|
|
|
if prop_dict.get("avb_hash_enable") == "true":
|
|
|
|
avb_footer_type = 'hash'
|
|
|
|
elif prop_dict.get("avb_hashtree_enable") == "true":
|
|
|
|
avb_footer_type = 'hashtree'
|
|
|
|
|
|
|
|
if avb_footer_type:
|
2017-05-26 12:30:04 +02:00
|
|
|
avbtool = prop_dict["avb_avbtool"]
|
|
|
|
partition_size = prop_dict["partition_size"]
|
2017-05-19 17:44:26 +02:00
|
|
|
# avb_add_hash_footer_args or avb_add_hashtree_footer_args.
|
|
|
|
additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
|
2017-12-07 19:33:00 +01:00
|
|
|
max_image_size = AVBCalcMaxImageSize(avbtool, avb_footer_type,
|
|
|
|
partition_size, additional_args)
|
2016-09-30 23:29:22 +02:00
|
|
|
if max_image_size == 0:
|
|
|
|
return False
|
|
|
|
prop_dict["partition_size"] = str(max_image_size)
|
2017-05-26 12:30:04 +02:00
|
|
|
prop_dict["original_partition_size"] = partition_size
|
2016-09-30 23:29:22 +02:00
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
if fs_type.startswith("ext"):
|
2016-11-19 02:06:29 +01:00
|
|
|
build_command = [prop_dict["ext_mkuserimg"]]
|
2011-10-29 02:02:30 +02:00
|
|
|
if "extfs_sparse_flag" in prop_dict:
|
|
|
|
build_command.append(prop_dict["extfs_sparse_flag"])
|
2017-12-07 19:33:00 +01:00
|
|
|
run_e2fsck = True
|
2011-10-29 02:02:30 +02:00
|
|
|
build_command.extend([in_dir, out_file, fs_type,
|
|
|
|
prop_dict["mount_point"]])
|
2013-12-06 00:54:55 +01:00
|
|
|
build_command.append(prop_dict["partition_size"])
|
2014-11-19 03:03:13 +01:00
|
|
|
if "journal_size" in prop_dict:
|
|
|
|
build_command.extend(["-j", prop_dict["journal_size"]])
|
2013-12-06 00:54:55 +01:00
|
|
|
if "timestamp" in prop_dict:
|
|
|
|
build_command.extend(["-T", str(prop_dict["timestamp"])])
|
2015-03-25 03:07:40 +01:00
|
|
|
if fs_config:
|
2014-06-16 18:10:55 +02:00
|
|
|
build_command.extend(["-C", fs_config])
|
2015-07-09 18:54:55 +02:00
|
|
|
if target_out:
|
|
|
|
build_command.extend(["-D", target_out])
|
2015-03-25 03:07:40 +01:00
|
|
|
if "block_list" in prop_dict:
|
|
|
|
build_command.extend(["-B", prop_dict["block_list"]])
|
2016-03-03 06:07:23 +01:00
|
|
|
if "base_fs_file" in prop_dict:
|
|
|
|
base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"])
|
|
|
|
if base_fs_file is None:
|
|
|
|
return False
|
|
|
|
build_command.extend(["-d", base_fs_file])
|
2014-12-17 21:34:12 +01:00
|
|
|
build_command.extend(["-L", prop_dict["mount_point"]])
|
2016-10-20 19:58:12 +02:00
|
|
|
if "extfs_inode_count" in prop_dict:
|
|
|
|
build_command.extend(["-i", prop_dict["extfs_inode_count"]])
|
2018-03-23 19:36:43 +01:00
|
|
|
if "extfs_rsv_pct" in prop_dict:
|
|
|
|
build_command.extend(["-M", prop_dict["extfs_rsv_pct"]])
|
2017-01-06 01:48:14 +01:00
|
|
|
if "flash_erase_block_size" in prop_dict:
|
|
|
|
build_command.extend(["-e", prop_dict["flash_erase_block_size"]])
|
|
|
|
if "flash_logical_block_size" in prop_dict:
|
|
|
|
build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
|
2017-09-23 00:45:33 +02:00
|
|
|
# Specify UUID and hash_seed if using mke2fs.
|
|
|
|
if prop_dict["ext_mkuserimg"] == "mkuserimg_mke2fs.sh":
|
|
|
|
if "uuid" in prop_dict:
|
|
|
|
build_command.extend(["-U", prop_dict["uuid"]])
|
|
|
|
if "hash_seed" in prop_dict:
|
|
|
|
build_command.extend(["-S", prop_dict["hash_seed"]])
|
2018-01-22 22:15:46 +01:00
|
|
|
if "ext4_share_dup_blocks" in prop_dict:
|
|
|
|
build_command.append("-c")
|
2015-03-25 03:07:40 +01:00
|
|
|
if "selinux_fc" in prop_dict:
|
2012-04-08 19:42:34 +02:00
|
|
|
build_command.append(prop_dict["selinux_fc"])
|
2015-03-03 21:30:37 +01:00
|
|
|
elif fs_type.startswith("squash"):
|
|
|
|
build_command = ["mksquashfsimage.sh"]
|
|
|
|
build_command.extend([in_dir, out_file])
|
2015-12-16 03:00:14 +01:00
|
|
|
if "squashfs_sparse_flag" in prop_dict:
|
|
|
|
build_command.extend([prop_dict["squashfs_sparse_flag"]])
|
2015-03-03 21:30:37 +01:00
|
|
|
build_command.extend(["-m", prop_dict["mount_point"]])
|
2015-07-09 18:54:55 +02:00
|
|
|
if target_out:
|
|
|
|
build_command.extend(["-d", target_out])
|
2016-04-08 07:10:51 +02:00
|
|
|
if fs_config:
|
|
|
|
build_command.extend(["-C", fs_config])
|
2015-03-25 03:07:40 +01:00
|
|
|
if "selinux_fc" in prop_dict:
|
2015-03-03 21:30:37 +01:00
|
|
|
build_command.extend(["-c", prop_dict["selinux_fc"]])
|
2016-06-13 18:46:58 +02:00
|
|
|
if "block_list" in prop_dict:
|
|
|
|
build_command.extend(["-B", prop_dict["block_list"]])
|
2018-01-20 00:51:46 +01:00
|
|
|
if "squashfs_block_size" in prop_dict:
|
|
|
|
build_command.extend(["-b", prop_dict["squashfs_block_size"]])
|
2015-06-17 21:35:15 +02:00
|
|
|
if "squashfs_compressor" in prop_dict:
|
|
|
|
build_command.extend(["-z", prop_dict["squashfs_compressor"]])
|
|
|
|
if "squashfs_compressor_opt" in prop_dict:
|
|
|
|
build_command.extend(["-zo", prop_dict["squashfs_compressor_opt"]])
|
2017-12-07 19:33:00 +01:00
|
|
|
if prop_dict.get("squashfs_disable_4k_align") == "true":
|
2016-06-16 00:53:07 +02:00
|
|
|
build_command.extend(["-a"])
|
2014-06-16 23:17:40 +02:00
|
|
|
elif fs_type.startswith("f2fs"):
|
|
|
|
build_command = ["mkf2fsuserimg.sh"]
|
|
|
|
build_command.extend([out_file, prop_dict["partition_size"]])
|
2017-11-29 04:21:28 +01:00
|
|
|
if fs_config:
|
|
|
|
build_command.extend(["-C", fs_config])
|
|
|
|
build_command.extend(["-f", in_dir])
|
|
|
|
if target_out:
|
|
|
|
build_command.extend(["-D", target_out])
|
|
|
|
if "selinux_fc" in prop_dict:
|
|
|
|
build_command.extend(["-s", prop_dict["selinux_fc"]])
|
|
|
|
build_command.extend(["-t", prop_dict["mount_point"]])
|
|
|
|
if "timestamp" in prop_dict:
|
|
|
|
build_command.extend(["-T", str(prop_dict["timestamp"])])
|
|
|
|
build_command.extend(["-L", prop_dict["mount_point"]])
|
2011-10-29 02:02:30 +02:00
|
|
|
else:
|
2016-06-16 02:04:54 +02:00
|
|
|
print("Error: unknown filesystem type '%s'" % (fs_type))
|
|
|
|
return False
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2015-03-25 03:07:40 +01:00
|
|
|
if in_dir != origin_in:
|
|
|
|
# Construct a staging directory of the root file system.
|
2018-07-21 00:20:28 +02:00
|
|
|
root_dir = prop_dict.get("root_dir")
|
|
|
|
if root_dir:
|
2015-03-25 03:07:40 +01:00
|
|
|
shutil.rmtree(in_dir)
|
2018-07-21 00:20:28 +02:00
|
|
|
shutil.copytree(root_dir, in_dir, symlinks=True)
|
2015-03-25 03:07:40 +01:00
|
|
|
staging_system = os.path.join(in_dir, "system")
|
|
|
|
shutil.rmtree(staging_system, ignore_errors=True)
|
|
|
|
shutil.copytree(origin_in, staging_system, symlinks=True)
|
2015-06-23 20:16:05 +02:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
(mkfs_output, exit_code) = RunCommand(build_command)
|
2012-11-27 03:10:23 +01:00
|
|
|
if exit_code != 0:
|
2018-01-19 19:29:52 +01:00
|
|
|
print("Error: '%s' failed with exit code %d:\n%s" % (
|
|
|
|
build_command, exit_code, mkfs_output))
|
2018-06-19 01:32:35 +02:00
|
|
|
success, du = GetDiskUsage(origin_in)
|
|
|
|
du_str = ("%d bytes (%d MB)" % (du, du // BYTES_IN_MB)
|
|
|
|
) if success else "unknown"
|
2018-07-23 22:05:00 +02:00
|
|
|
print(
|
|
|
|
"Out of space? The tree size of {} is {}, with reserved space of {} "
|
|
|
|
"bytes ({} MB).".format(
|
|
|
|
origin_in, du_str,
|
|
|
|
int(prop_dict.get("partition_reserved_size", 0)),
|
|
|
|
int(prop_dict.get("partition_reserved_size", 0)) // BYTES_IN_MB))
|
|
|
|
if "original_partition_size" in prop_dict:
|
|
|
|
print(
|
|
|
|
"The max size for filsystem files is {} bytes ({} MB), out of a "
|
|
|
|
"total image size of {} bytes ({} MB).".format(
|
|
|
|
int(prop_dict["partition_size"]),
|
|
|
|
int(prop_dict["partition_size"]) // BYTES_IN_MB,
|
|
|
|
int(prop_dict["original_partition_size"]),
|
|
|
|
int(prop_dict["original_partition_size"]) // BYTES_IN_MB))
|
|
|
|
else:
|
|
|
|
print("The max image size is {} bytes ({} MB).".format(
|
|
|
|
int(prop_dict["partition_size"]),
|
|
|
|
int(prop_dict["partition_size"]) // BYTES_IN_MB))
|
2012-11-27 03:10:23 +01:00
|
|
|
return False
|
|
|
|
|
2017-12-08 08:01:25 +01:00
|
|
|
# Check if there's enough headroom space available for ext4 image.
|
2017-12-07 23:07:44 +01:00
|
|
|
if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
|
2017-12-07 19:33:00 +01:00
|
|
|
if not CheckHeadroom(mkfs_output, prop_dict):
|
2015-06-23 20:16:05 +02:00
|
|
|
return False
|
|
|
|
|
2015-03-24 20:42:03 +01:00
|
|
|
if not fs_spans_partition:
|
|
|
|
mount_point = prop_dict.get("mount_point")
|
|
|
|
partition_size = int(prop_dict.get("partition_size"))
|
2016-02-09 21:28:58 +01:00
|
|
|
image_size = GetSimgSize(out_file)
|
2015-03-24 20:42:03 +01:00
|
|
|
if image_size > partition_size:
|
2015-06-23 20:16:05 +02:00
|
|
|
print("Error: %s image size of %d is larger than partition size of "
|
|
|
|
"%d" % (mount_point, image_size, partition_size))
|
|
|
|
return False
|
2015-03-24 20:42:03 +01:00
|
|
|
if verity_supported and is_verity_partition:
|
2016-02-09 21:28:58 +01:00
|
|
|
ZeroPadSimg(out_file, partition_size - image_size)
|
2015-03-24 20:42:03 +01:00
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
# Create the verified image if this is to be verified.
|
2014-05-06 07:19:37 +02:00
|
|
|
if verity_supported and is_verity_partition:
|
2015-05-20 08:30:57 +02:00
|
|
|
if not MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
2017-05-19 17:44:26 +02:00
|
|
|
# Add AVB HASH or HASHTREE footer (metadata).
|
|
|
|
if avb_footer_type:
|
2017-05-26 12:30:04 +02:00
|
|
|
avbtool = prop_dict["avb_avbtool"]
|
|
|
|
original_partition_size = prop_dict["original_partition_size"]
|
2016-09-30 23:29:22 +02:00
|
|
|
partition_name = prop_dict["partition_name"]
|
2017-05-26 12:30:04 +02:00
|
|
|
# key_path and algorithm are only available when chain partition is used.
|
|
|
|
key_path = prop_dict.get("avb_key_path")
|
|
|
|
algorithm = prop_dict.get("avb_algorithm")
|
2017-09-28 02:17:43 +02:00
|
|
|
salt = prop_dict.get("avb_salt")
|
2017-05-19 17:44:26 +02:00
|
|
|
# avb_add_hash_footer_args or avb_add_hashtree_footer_args
|
|
|
|
additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
|
2017-12-07 19:33:00 +01:00
|
|
|
if not AVBAddFooter(out_file, avbtool, avb_footer_type,
|
|
|
|
original_partition_size, partition_name, key_path,
|
|
|
|
algorithm, salt, additional_args):
|
2016-09-30 23:29:22 +02:00
|
|
|
return False
|
|
|
|
|
2017-12-07 19:33:00 +01:00
|
|
|
if run_e2fsck and prop_dict.get("skip_fsck") != "true":
|
2013-12-06 02:09:18 +01:00
|
|
|
success, unsparse_image = UnsparseImage(out_file, replace=False)
|
2013-06-17 02:26:08 +02:00
|
|
|
if not success:
|
2012-11-27 03:10:23 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
# Run e2fsck on the inflated image file
|
|
|
|
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
|
2018-01-19 19:29:52 +01:00
|
|
|
(e2fsck_output, exit_code) = RunCommand(e2fsck_command)
|
2012-11-27 03:10:23 +01:00
|
|
|
|
|
|
|
os.remove(unsparse_image)
|
|
|
|
|
2017-12-06 21:16:39 +01:00
|
|
|
if exit_code != 0:
|
2018-01-19 19:29:52 +01:00
|
|
|
print("Error: '%s' failed with exit code %d:\n%s" % (
|
|
|
|
e2fsck_command, exit_code, e2fsck_output))
|
2017-12-06 21:16:39 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def ImagePropFromGlobalDict(glob_dict, mount_point):
|
|
|
|
"""Build an image property dictionary from the global dictionary.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
glob_dict: the global dictionary from the build system.
|
|
|
|
mount_point: such as "system", "data" etc.
|
|
|
|
"""
|
2013-12-06 20:53:27 +01:00
|
|
|
d = {}
|
2015-09-28 22:44:13 +02:00
|
|
|
|
2015-10-01 01:01:14 +02:00
|
|
|
if "build.prop" in glob_dict:
|
|
|
|
bp = glob_dict["build.prop"]
|
|
|
|
if "ro.build.date.utc" in bp:
|
|
|
|
d["timestamp"] = bp["ro.build.date.utc"]
|
2011-11-04 19:37:01 +01:00
|
|
|
|
|
|
|
def copy_prop(src_p, dest_p):
|
2018-03-23 19:36:43 +01:00
|
|
|
"""Copy a property from the global dictionary.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
src_p: The source property in the global dictionary.
|
|
|
|
dest_p: The destination property.
|
|
|
|
Returns:
|
|
|
|
True if property was found and copied, False otherwise.
|
|
|
|
"""
|
2011-11-04 19:37:01 +01:00
|
|
|
if src_p in glob_dict:
|
|
|
|
d[dest_p] = str(glob_dict[src_p])
|
2018-03-23 19:36:43 +01:00
|
|
|
return True
|
|
|
|
return False
|
2011-11-04 19:37:01 +01:00
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
common_props = (
|
|
|
|
"extfs_sparse_flag",
|
2015-12-16 03:00:14 +01:00
|
|
|
"squashfs_sparse_flag",
|
2012-04-08 19:42:34 +02:00
|
|
|
"selinux_fc",
|
2013-02-27 22:54:02 +01:00
|
|
|
"skip_fsck",
|
2016-11-19 02:06:29 +01:00
|
|
|
"ext_mkuserimg",
|
2013-06-17 02:26:08 +02:00
|
|
|
"verity",
|
|
|
|
"verity_key",
|
2015-05-20 08:30:57 +02:00
|
|
|
"verity_signer_cmd",
|
2016-09-30 23:29:22 +02:00
|
|
|
"verity_fec",
|
2017-10-11 10:21:48 +02:00
|
|
|
"verity_disable",
|
2017-05-26 12:30:04 +02:00
|
|
|
"avb_enable",
|
2017-09-28 02:17:43 +02:00
|
|
|
"avb_avbtool",
|
|
|
|
"avb_salt",
|
2018-06-19 01:32:35 +02:00
|
|
|
"use_logical_partitions",
|
2017-09-28 02:17:43 +02:00
|
|
|
)
|
2011-10-29 02:02:30 +02:00
|
|
|
for p in common_props:
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop(p, p)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
d["mount_point"] = mount_point
|
|
|
|
if mount_point == "system":
|
2017-05-26 12:30:04 +02:00
|
|
|
copy_prop("avb_system_hashtree_enable", "avb_hashtree_enable")
|
|
|
|
copy_prop("avb_system_add_hashtree_footer_args",
|
|
|
|
"avb_add_hashtree_footer_args")
|
|
|
|
copy_prop("avb_system_key_path", "avb_key_path")
|
|
|
|
copy_prop("avb_system_algorithm", "avb_algorithm")
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("fs_type", "fs_type")
|
2017-05-03 22:43:27 +02:00
|
|
|
# Copy the generic system fs type first, override with specific one if
|
2015-03-24 03:13:21 +01:00
|
|
|
# available.
|
2015-03-03 21:30:37 +01:00
|
|
|
copy_prop("system_fs_type", "fs_type")
|
2017-05-03 22:43:27 +02:00
|
|
|
copy_prop("system_headroom", "partition_headroom")
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("system_size", "partition_size")
|
2018-03-31 19:27:35 +02:00
|
|
|
if not copy_prop("system_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
2014-07-11 00:42:38 +02:00
|
|
|
copy_prop("system_verity_block_device", "verity_block_device")
|
2015-04-01 20:21:55 +02:00
|
|
|
copy_prop("system_root_image", "system_root_image")
|
2018-07-21 00:20:28 +02:00
|
|
|
copy_prop("root_dir", "root_dir")
|
|
|
|
copy_prop("root_fs_config", "root_fs_config")
|
2018-01-22 22:15:46 +01:00
|
|
|
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
|
2015-06-17 21:35:15 +02:00
|
|
|
copy_prop("system_squashfs_compressor", "squashfs_compressor")
|
|
|
|
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
|
2016-05-24 21:59:30 +02:00
|
|
|
copy_prop("system_squashfs_block_size", "squashfs_block_size")
|
2016-06-16 00:53:07 +02:00
|
|
|
copy_prop("system_squashfs_disable_4k_align", "squashfs_disable_4k_align")
|
2016-03-03 06:07:23 +01:00
|
|
|
copy_prop("system_base_fs_file", "base_fs_file")
|
2016-10-20 19:58:12 +02:00
|
|
|
copy_prop("system_extfs_inode_count", "extfs_inode_count")
|
2018-03-23 19:36:43 +01:00
|
|
|
if not copy_prop("system_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2018-06-19 01:32:35 +02:00
|
|
|
copy_prop("system_reserved_size", "partition_reserved_size")
|
2016-06-16 23:47:10 +02:00
|
|
|
elif mount_point == "system_other":
|
2017-12-07 19:33:00 +01:00
|
|
|
# We inherit the selinux policies of /system since we contain some of its
|
|
|
|
# files.
|
2016-06-16 23:47:10 +02:00
|
|
|
d["mount_point"] = "system"
|
2017-05-26 12:30:04 +02:00
|
|
|
copy_prop("avb_system_hashtree_enable", "avb_hashtree_enable")
|
|
|
|
copy_prop("avb_system_add_hashtree_footer_args",
|
|
|
|
"avb_add_hashtree_footer_args")
|
|
|
|
copy_prop("avb_system_key_path", "avb_key_path")
|
|
|
|
copy_prop("avb_system_algorithm", "avb_algorithm")
|
2016-06-16 23:47:10 +02:00
|
|
|
copy_prop("fs_type", "fs_type")
|
|
|
|
copy_prop("system_fs_type", "fs_type")
|
|
|
|
copy_prop("system_size", "partition_size")
|
2018-03-31 19:27:35 +02:00
|
|
|
if not copy_prop("system_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
2016-06-16 23:47:10 +02:00
|
|
|
copy_prop("system_verity_block_device", "verity_block_device")
|
|
|
|
copy_prop("system_squashfs_compressor", "squashfs_compressor")
|
|
|
|
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
|
|
|
|
copy_prop("system_squashfs_block_size", "squashfs_block_size")
|
|
|
|
copy_prop("system_base_fs_file", "base_fs_file")
|
2016-10-20 19:58:12 +02:00
|
|
|
copy_prop("system_extfs_inode_count", "extfs_inode_count")
|
2018-03-23 19:36:43 +01:00
|
|
|
if not copy_prop("system_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2018-06-19 01:32:35 +02:00
|
|
|
copy_prop("system_reserved_size", "partition_reserved_size")
|
2011-10-29 02:02:30 +02:00
|
|
|
elif mount_point == "data":
|
2014-06-16 23:17:40 +02:00
|
|
|
# Copy the generic fs type first, override with specific one if available.
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("fs_type", "fs_type")
|
2014-06-16 23:17:40 +02:00
|
|
|
copy_prop("userdata_fs_type", "fs_type")
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("userdata_size", "partition_size")
|
2017-12-07 19:33:00 +01:00
|
|
|
copy_prop("flash_logical_block_size", "flash_logical_block_size")
|
2017-01-06 01:48:14 +01:00
|
|
|
copy_prop("flash_erase_block_size", "flash_erase_block_size")
|
2011-11-04 19:37:01 +01:00
|
|
|
elif mount_point == "cache":
|
|
|
|
copy_prop("cache_fs_type", "fs_type")
|
|
|
|
copy_prop("cache_size", "partition_size")
|
2013-03-20 19:02:05 +01:00
|
|
|
elif mount_point == "vendor":
|
2017-05-26 12:30:04 +02:00
|
|
|
copy_prop("avb_vendor_hashtree_enable", "avb_hashtree_enable")
|
|
|
|
copy_prop("avb_vendor_add_hashtree_footer_args",
|
|
|
|
"avb_add_hashtree_footer_args")
|
|
|
|
copy_prop("avb_vendor_key_path", "avb_key_path")
|
|
|
|
copy_prop("avb_vendor_algorithm", "avb_algorithm")
|
2013-03-20 19:02:05 +01:00
|
|
|
copy_prop("vendor_fs_type", "fs_type")
|
|
|
|
copy_prop("vendor_size", "partition_size")
|
2018-03-31 19:27:35 +02:00
|
|
|
if not copy_prop("vendor_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
2014-07-11 00:42:38 +02:00
|
|
|
copy_prop("vendor_verity_block_device", "verity_block_device")
|
2018-01-22 22:15:46 +01:00
|
|
|
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
|
2016-02-10 00:40:38 +01:00
|
|
|
copy_prop("vendor_squashfs_compressor", "squashfs_compressor")
|
|
|
|
copy_prop("vendor_squashfs_compressor_opt", "squashfs_compressor_opt")
|
2016-05-24 21:59:30 +02:00
|
|
|
copy_prop("vendor_squashfs_block_size", "squashfs_block_size")
|
2016-06-16 00:53:07 +02:00
|
|
|
copy_prop("vendor_squashfs_disable_4k_align", "squashfs_disable_4k_align")
|
2016-03-03 06:07:23 +01:00
|
|
|
copy_prop("vendor_base_fs_file", "base_fs_file")
|
2016-10-20 19:58:12 +02:00
|
|
|
copy_prop("vendor_extfs_inode_count", "extfs_inode_count")
|
2018-03-23 19:36:43 +01:00
|
|
|
if not copy_prop("vendor_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2018-06-20 01:23:16 +02:00
|
|
|
copy_prop("vendor_reserved_size", "partition_reserved_size")
|
2017-11-27 09:04:47 +01:00
|
|
|
elif mount_point == "product":
|
|
|
|
copy_prop("avb_product_hashtree_enable", "avb_hashtree_enable")
|
|
|
|
copy_prop("avb_product_add_hashtree_footer_args",
|
|
|
|
"avb_add_hashtree_footer_args")
|
|
|
|
copy_prop("avb_product_key_path", "avb_key_path")
|
|
|
|
copy_prop("avb_product_algorithm", "avb_algorithm")
|
|
|
|
copy_prop("product_fs_type", "fs_type")
|
|
|
|
copy_prop("product_size", "partition_size")
|
2018-03-31 19:27:35 +02:00
|
|
|
if not copy_prop("product_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
2017-11-27 09:04:47 +01:00
|
|
|
copy_prop("product_verity_block_device", "verity_block_device")
|
|
|
|
copy_prop("product_squashfs_compressor", "squashfs_compressor")
|
|
|
|
copy_prop("product_squashfs_compressor_opt", "squashfs_compressor_opt")
|
|
|
|
copy_prop("product_squashfs_block_size", "squashfs_block_size")
|
|
|
|
copy_prop("product_squashfs_disable_4k_align", "squashfs_disable_4k_align")
|
|
|
|
copy_prop("product_base_fs_file", "base_fs_file")
|
|
|
|
copy_prop("product_extfs_inode_count", "extfs_inode_count")
|
2018-03-23 19:36:43 +01:00
|
|
|
if not copy_prop("product_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2018-07-21 00:19:34 +02:00
|
|
|
copy_prop("product_reserved_size", "partition_reserved_size")
|
2018-05-29 14:09:01 +02:00
|
|
|
elif mount_point == "product-services":
|
|
|
|
copy_prop("avb_productservices_hashtree_enable", "avb_hashtree_enable")
|
|
|
|
copy_prop("avb_productservices_add_hashtree_footer_args",
|
|
|
|
"avb_add_hashtree_footer_args")
|
|
|
|
copy_prop("avb_productservices_key_path", "avb_key_path")
|
|
|
|
copy_prop("avb_productservices_algorithm", "avb_algorithm")
|
|
|
|
copy_prop("productservices_fs_type", "fs_type")
|
|
|
|
copy_prop("productservices_size", "partition_size")
|
|
|
|
if not copy_prop("productservices_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
|
|
|
copy_prop("productservices_verity_block_device", "verity_block_device")
|
|
|
|
copy_prop("productservices_squashfs_compressor", "squashfs_compressor")
|
|
|
|
copy_prop("productservices_squashfs_compressor_opt",
|
|
|
|
"squashfs_compressor_opt")
|
|
|
|
copy_prop("productservices_squashfs_block_size", "squashfs_block_size")
|
|
|
|
copy_prop("productservices_squashfs_disable_4k_align",
|
|
|
|
"squashfs_disable_4k_align")
|
|
|
|
copy_prop("productservices_base_fs_file", "base_fs_file")
|
|
|
|
copy_prop("productservices_extfs_inode_count", "extfs_inode_count")
|
|
|
|
if not copy_prop("productservices_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2018-07-21 00:33:47 +02:00
|
|
|
copy_prop("productservices_reserved_size", "partition_reserved_size")
|
2014-03-12 01:13:27 +01:00
|
|
|
elif mount_point == "oem":
|
|
|
|
copy_prop("fs_type", "fs_type")
|
|
|
|
copy_prop("oem_size", "partition_size")
|
2018-03-31 19:27:35 +02:00
|
|
|
if not copy_prop("oem_journal_size", "journal_size"):
|
|
|
|
d["journal_size"] = "0"
|
2016-10-20 19:58:12 +02:00
|
|
|
copy_prop("oem_extfs_inode_count", "extfs_inode_count")
|
2018-03-23 19:36:43 +01:00
|
|
|
if not copy_prop("oem_extfs_rsv_pct", "extfs_rsv_pct"):
|
|
|
|
d["extfs_rsv_pct"] = "0"
|
2016-09-30 23:29:22 +02:00
|
|
|
d["partition_name"] = mount_point
|
2011-10-29 02:02:30 +02:00
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def LoadGlobalDict(filename):
|
|
|
|
"""Load "name=value" pairs from filename"""
|
|
|
|
d = {}
|
|
|
|
f = open(filename)
|
|
|
|
for line in f:
|
|
|
|
line = line.strip()
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
continue
|
|
|
|
k, v = line.split("=", 1)
|
|
|
|
d[k] = v
|
|
|
|
f.close()
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
def GlobalDictFromImageProp(image_prop, mount_point):
|
|
|
|
d = {}
|
|
|
|
def copy_prop(src_p, dest_p):
|
|
|
|
if src_p in image_prop:
|
|
|
|
d[dest_p] = image_prop[src_p]
|
|
|
|
return True
|
|
|
|
return False
|
2018-07-23 22:05:00 +02:00
|
|
|
|
|
|
|
if "original_partition_size" in image_prop:
|
|
|
|
size_property = "original_partition_size"
|
|
|
|
else:
|
|
|
|
size_property = "partition_size"
|
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
if mount_point == "system":
|
2018-07-23 22:05:00 +02:00
|
|
|
copy_prop(size_property, "system_size")
|
2018-06-19 01:32:35 +02:00
|
|
|
elif mount_point == "system_other":
|
2018-07-23 22:05:00 +02:00
|
|
|
copy_prop(size_property, "system_size")
|
2018-06-20 01:23:16 +02:00
|
|
|
elif mount_point == "vendor":
|
2018-07-23 22:05:00 +02:00
|
|
|
copy_prop(size_property, "vendor_size")
|
2018-07-21 00:19:34 +02:00
|
|
|
elif mount_point == "product":
|
2018-07-23 22:05:00 +02:00
|
|
|
copy_prop(size_property, "product_size")
|
2018-07-21 00:33:47 +02:00
|
|
|
elif mount_point == "product-services":
|
2018-07-23 22:05:00 +02:00
|
|
|
copy_prop(size_property, "productservices_size")
|
2018-06-19 01:32:35 +02:00
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def SaveGlobalDict(filename, glob_dict):
|
|
|
|
with open(filename, "w") as f:
|
|
|
|
f.writelines(["%s=%s" % (key, value) for (key, value) in glob_dict.items()])
|
|
|
|
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
def main(argv):
|
2018-06-19 01:32:35 +02:00
|
|
|
if len(argv) < 4 or len(argv) > 5:
|
2017-12-07 19:33:00 +01:00
|
|
|
print(__doc__)
|
2011-10-29 02:02:30 +02:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
in_dir = argv[0]
|
|
|
|
glob_dict_file = argv[1]
|
|
|
|
out_file = argv[2]
|
2015-07-09 18:54:55 +02:00
|
|
|
target_out = argv[3]
|
2018-06-19 01:32:35 +02:00
|
|
|
prop_file_out = argv[4] if len(argv) >= 5 else None
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
glob_dict = LoadGlobalDict(glob_dict_file)
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
if "mount_point" in glob_dict:
|
2015-06-23 20:16:05 +02:00
|
|
|
# The caller knows the mount point and provides a dictionay needed by
|
|
|
|
# BuildImage().
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
image_properties = glob_dict
|
2011-11-04 19:37:01 +01:00
|
|
|
else:
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
image_filename = os.path.basename(out_file)
|
|
|
|
mount_point = ""
|
|
|
|
if image_filename == "system.img":
|
|
|
|
mount_point = "system"
|
2016-06-16 23:47:10 +02:00
|
|
|
elif image_filename == "system_other.img":
|
|
|
|
mount_point = "system_other"
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
elif image_filename == "userdata.img":
|
|
|
|
mount_point = "data"
|
|
|
|
elif image_filename == "cache.img":
|
|
|
|
mount_point = "cache"
|
|
|
|
elif image_filename == "vendor.img":
|
|
|
|
mount_point = "vendor"
|
|
|
|
elif image_filename == "oem.img":
|
|
|
|
mount_point = "oem"
|
2017-11-27 09:04:47 +01:00
|
|
|
elif image_filename == "product.img":
|
|
|
|
mount_point = "product"
|
2018-05-29 14:09:01 +02:00
|
|
|
elif image_filename == "product-services.img":
|
|
|
|
mount_point = "product-services"
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
else:
|
2017-12-07 19:33:00 +01:00
|
|
|
print("error: unknown image file name ", image_filename, file=sys.stderr)
|
2017-12-25 19:43:47 +01:00
|
|
|
sys.exit(1)
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
|
|
|
|
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2015-07-09 18:54:55 +02:00
|
|
|
if not BuildImage(in_dir, image_properties, out_file, target_out):
|
2017-12-07 19:33:00 +01:00
|
|
|
print("error: failed to build %s from %s" % (out_file, in_dir),
|
|
|
|
file=sys.stderr)
|
2017-12-25 19:43:47 +01:00
|
|
|
sys.exit(1)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2018-06-19 01:32:35 +02:00
|
|
|
if prop_file_out:
|
|
|
|
glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
|
|
|
|
SaveGlobalDict(prop_file_out, glob_dict_out)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2017-12-25 19:43:47 +01:00
|
|
|
try:
|
|
|
|
main(sys.argv[1:])
|
|
|
|
finally:
|
|
|
|
common.Cleanup()
|