2011-10-29 02:02:30 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright (C) 2011 The Android Open Source Project
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
"""
|
|
|
|
Build image output_image_file from input_directory and properties_file.
|
|
|
|
|
|
|
|
Usage: build_image input_directory properties_file output_image_file
|
|
|
|
|
|
|
|
"""
|
|
|
|
import os
|
2012-11-27 03:10:23 +01:00
|
|
|
import os.path
|
2015-06-23 20:16:05 +02:00
|
|
|
import re
|
2011-10-29 02:02:30 +02:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2013-06-17 02:26:08 +02:00
|
|
|
import commands
|
|
|
|
import shutil
|
2014-05-06 07:19:37 +02:00
|
|
|
import tempfile
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2014-05-17 04:14:30 +02:00
|
|
|
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
|
|
|
|
|
2012-11-27 03:10:23 +01:00
|
|
|
def RunCommand(cmd):
|
2015-06-23 20:16:05 +02:00
|
|
|
"""Echo and run the given command.
|
2012-11-27 03:10:23 +01:00
|
|
|
|
|
|
|
Args:
|
|
|
|
cmd: the command represented as a list of strings.
|
|
|
|
Returns:
|
2015-06-23 20:16:05 +02:00
|
|
|
A tuple of the output and the exit code.
|
2012-11-27 03:10:23 +01:00
|
|
|
"""
|
|
|
|
print "Running: ", " ".join(cmd)
|
2015-06-23 20:16:05 +02:00
|
|
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
output, _ = p.communicate()
|
|
|
|
print "%s" % (output.rstrip(),)
|
|
|
|
return (output, p.returncode)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
def GetVerityTreeSize(partition_size):
|
2014-04-17 03:49:56 +02:00
|
|
|
cmd = "build_verity_tree -s %d"
|
2013-06-17 02:26:08 +02:00
|
|
|
cmd %= partition_size
|
|
|
|
status, output = commands.getstatusoutput(cmd)
|
|
|
|
if status:
|
|
|
|
print output
|
|
|
|
return False, 0
|
|
|
|
return True, int(output)
|
|
|
|
|
|
|
|
def GetVerityMetadataSize(partition_size):
|
|
|
|
cmd = "system/extras/verity/build_verity_metadata.py -s %d"
|
|
|
|
cmd %= partition_size
|
|
|
|
status, output = commands.getstatusoutput(cmd)
|
|
|
|
if status:
|
|
|
|
print output
|
|
|
|
return False, 0
|
|
|
|
return True, int(output)
|
|
|
|
|
|
|
|
def AdjustPartitionSizeForVerity(partition_size):
|
|
|
|
"""Modifies the provided partition size to account for the verity metadata.
|
|
|
|
|
|
|
|
This information is used to size the created image appropriately.
|
|
|
|
Args:
|
|
|
|
partition_size: the size of the partition to be verified.
|
|
|
|
Returns:
|
|
|
|
The size of the partition adjusted for verity metadata.
|
|
|
|
"""
|
|
|
|
success, verity_tree_size = GetVerityTreeSize(partition_size)
|
|
|
|
if not success:
|
2015-03-24 03:13:21 +01:00
|
|
|
return 0
|
2013-06-17 02:26:08 +02:00
|
|
|
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
|
|
|
|
if not success:
|
|
|
|
return 0
|
|
|
|
return partition_size - verity_tree_size - verity_metadata_size
|
|
|
|
|
2014-04-17 03:49:56 +02:00
|
|
|
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
|
2015-03-24 03:13:21 +01:00
|
|
|
cmd = "build_verity_tree -A %s %s %s" % (
|
|
|
|
FIXED_SALT, sparse_image_path, verity_image_path)
|
2013-06-17 02:26:08 +02:00
|
|
|
print cmd
|
|
|
|
status, output = commands.getstatusoutput(cmd)
|
|
|
|
if status:
|
|
|
|
print "Could not build verity tree! Error: %s" % output
|
|
|
|
return False
|
|
|
|
root, salt = output.split()
|
|
|
|
prop_dict["verity_root_hash"] = root
|
|
|
|
prop_dict["verity_salt"] = salt
|
|
|
|
return True
|
|
|
|
|
|
|
|
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
|
|
|
|
block_device, signer_path, key):
|
2015-03-24 03:13:21 +01:00
|
|
|
cmd_template = (
|
|
|
|
"system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s")
|
|
|
|
cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
|
|
|
|
block_device, signer_path, key)
|
2013-06-17 02:26:08 +02:00
|
|
|
print cmd
|
|
|
|
status, output = commands.getstatusoutput(cmd)
|
|
|
|
if status:
|
|
|
|
print "Could not build verity metadata! Error: %s" % output
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
|
|
|
|
"""Appends the unsparse image to the given sparse image.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
sparse_image_path: the path to the (sparse) image
|
|
|
|
unsparse_image_path: the path to the (unsparse) image
|
|
|
|
Returns:
|
|
|
|
True on success, False on failure.
|
|
|
|
"""
|
|
|
|
cmd = "append2simg %s %s"
|
|
|
|
cmd %= (sparse_image_path, unsparse_image_path)
|
|
|
|
print cmd
|
|
|
|
status, output = commands.getstatusoutput(cmd)
|
|
|
|
if status:
|
|
|
|
print "%s: %s" % (error_message, output)
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
def BuildVerifiedImage(data_image_path, verity_image_path,
|
|
|
|
verity_metadata_path):
|
|
|
|
if not Append2Simg(data_image_path, verity_metadata_path,
|
|
|
|
"Could not append verity metadata!"):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
2015-03-24 03:13:21 +01:00
|
|
|
if not Append2Simg(data_image_path, verity_image_path,
|
|
|
|
"Could not append verity tree!"):
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2013-12-06 02:09:18 +01:00
|
|
|
def UnsparseImage(sparse_image_path, replace=True):
|
2013-06-17 02:26:08 +02:00
|
|
|
img_dir = os.path.dirname(sparse_image_path)
|
|
|
|
unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path)
|
|
|
|
unsparse_image_path = os.path.join(img_dir, unsparse_image_path)
|
|
|
|
if os.path.exists(unsparse_image_path):
|
2013-12-06 02:09:18 +01:00
|
|
|
if replace:
|
|
|
|
os.unlink(unsparse_image_path)
|
|
|
|
else:
|
|
|
|
return True, unsparse_image_path
|
2013-06-17 02:26:08 +02:00
|
|
|
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
|
2015-06-23 20:16:05 +02:00
|
|
|
(_, exit_code) = RunCommand(inflate_command)
|
2013-06-17 02:26:08 +02:00
|
|
|
if exit_code != 0:
|
|
|
|
os.remove(unsparse_image_path)
|
|
|
|
return False, None
|
|
|
|
return True, unsparse_image_path
|
|
|
|
|
|
|
|
def MakeVerityEnabledImage(out_file, prop_dict):
|
|
|
|
"""Creates an image that is verifiable using dm-verity.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
out_file: the location to write the verifiable image at
|
2015-03-24 03:13:21 +01:00
|
|
|
prop_dict: a dictionary of properties required for image creation and
|
|
|
|
verification
|
2013-06-17 02:26:08 +02:00
|
|
|
Returns:
|
|
|
|
True on success, False otherwise.
|
|
|
|
"""
|
|
|
|
# get properties
|
|
|
|
image_size = prop_dict["partition_size"]
|
|
|
|
block_dev = prop_dict["verity_block_device"]
|
2014-11-14 02:54:30 +01:00
|
|
|
signer_key = prop_dict["verity_key"] + ".pk8"
|
2013-06-17 02:26:08 +02:00
|
|
|
signer_path = prop_dict["verity_signer_cmd"]
|
|
|
|
|
|
|
|
# make a tempdir
|
2014-05-06 07:19:37 +02:00
|
|
|
tempdir_name = tempfile.mkdtemp(suffix="_verity_images")
|
2013-06-17 02:26:08 +02:00
|
|
|
|
|
|
|
# get partial image paths
|
|
|
|
verity_image_path = os.path.join(tempdir_name, "verity.img")
|
|
|
|
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
|
|
|
|
|
|
|
|
# build the verity tree and get the root hash and salt
|
2014-04-17 03:49:56 +02:00
|
|
|
if not BuildVerityTree(out_file, verity_image_path, prop_dict):
|
2014-05-06 07:19:37 +02:00
|
|
|
shutil.rmtree(tempdir_name, ignore_errors=True)
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# build the metadata blocks
|
|
|
|
root_hash = prop_dict["verity_root_hash"]
|
|
|
|
salt = prop_dict["verity_salt"]
|
2015-03-24 03:13:21 +01:00
|
|
|
if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
|
|
|
|
block_dev, signer_path, signer_key):
|
2014-05-06 07:19:37 +02:00
|
|
|
shutil.rmtree(tempdir_name, ignore_errors=True)
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# build the full verified image
|
|
|
|
if not BuildVerifiedImage(out_file,
|
|
|
|
verity_image_path,
|
|
|
|
verity_metadata_path):
|
2014-05-06 07:19:37 +02:00
|
|
|
shutil.rmtree(tempdir_name, ignore_errors=True)
|
2013-06-17 02:26:08 +02:00
|
|
|
return False
|
|
|
|
|
2014-05-06 07:19:37 +02:00
|
|
|
shutil.rmtree(tempdir_name, ignore_errors=True)
|
2013-06-17 02:26:08 +02:00
|
|
|
return True
|
|
|
|
|
2015-03-25 03:07:40 +01:00
|
|
|
def BuildImage(in_dir, prop_dict, out_file):
|
2011-10-29 02:02:30 +02:00
|
|
|
"""Build an image to out_file from in_dir with property prop_dict.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
in_dir: path of input directory.
|
|
|
|
prop_dict: property dictionary.
|
|
|
|
out_file: path of the output image file.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
True iff the image is built successfully.
|
|
|
|
"""
|
2015-04-01 20:21:55 +02:00
|
|
|
# system_root_image=true: build a system.img that combines the contents of
|
|
|
|
# /system and the ramdisk, and can be mounted at the root of the file system.
|
2015-03-25 03:07:40 +01:00
|
|
|
origin_in = in_dir
|
|
|
|
fs_config = prop_dict.get("fs_config")
|
|
|
|
if (prop_dict.get("system_root_image") == "true"
|
|
|
|
and prop_dict["mount_point"] == "system"):
|
|
|
|
in_dir = tempfile.mkdtemp()
|
|
|
|
# Change the mount point to "/"
|
|
|
|
prop_dict["mount_point"] = "/"
|
|
|
|
if fs_config:
|
|
|
|
# We need to merge the fs_config files of system and ramdisk.
|
|
|
|
fd, merged_fs_config = tempfile.mkstemp(prefix="root_fs_config",
|
|
|
|
suffix=".txt")
|
|
|
|
os.close(fd)
|
|
|
|
with open(merged_fs_config, "w") as fw:
|
|
|
|
if "ramdisk_fs_config" in prop_dict:
|
|
|
|
with open(prop_dict["ramdisk_fs_config"]) as fr:
|
|
|
|
fw.writelines(fr.readlines())
|
|
|
|
with open(fs_config) as fr:
|
|
|
|
fw.writelines(fr.readlines())
|
|
|
|
fs_config = merged_fs_config
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
build_command = []
|
|
|
|
fs_type = prop_dict.get("fs_type", "")
|
2012-11-27 03:10:23 +01:00
|
|
|
run_fsck = False
|
2013-06-17 02:26:08 +02:00
|
|
|
|
2015-03-24 20:42:03 +01:00
|
|
|
fs_spans_partition = True
|
|
|
|
if fs_type.startswith("squash"):
|
2015-06-23 20:16:05 +02:00
|
|
|
fs_spans_partition = False
|
2015-03-24 20:42:03 +01:00
|
|
|
|
2014-07-11 00:42:38 +02:00
|
|
|
is_verity_partition = "verity_block_device" in prop_dict
|
2014-05-06 07:19:37 +02:00
|
|
|
verity_supported = prop_dict.get("verity") == "true"
|
2015-06-23 20:16:05 +02:00
|
|
|
# Adjust the partition size to make room for the hashes if this is to be
|
|
|
|
# verified.
|
2015-03-24 20:42:03 +01:00
|
|
|
if verity_supported and is_verity_partition and fs_spans_partition:
|
2013-06-17 02:26:08 +02:00
|
|
|
partition_size = int(prop_dict.get("partition_size"))
|
|
|
|
adjusted_size = AdjustPartitionSizeForVerity(partition_size)
|
|
|
|
if not adjusted_size:
|
|
|
|
return False
|
|
|
|
prop_dict["partition_size"] = str(adjusted_size)
|
|
|
|
prop_dict["original_partition_size"] = str(partition_size)
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
if fs_type.startswith("ext"):
|
|
|
|
build_command = ["mkuserimg.sh"]
|
|
|
|
if "extfs_sparse_flag" in prop_dict:
|
|
|
|
build_command.append(prop_dict["extfs_sparse_flag"])
|
2012-11-27 03:10:23 +01:00
|
|
|
run_fsck = True
|
2011-10-29 02:02:30 +02:00
|
|
|
build_command.extend([in_dir, out_file, fs_type,
|
|
|
|
prop_dict["mount_point"]])
|
2013-12-06 00:54:55 +01:00
|
|
|
build_command.append(prop_dict["partition_size"])
|
2014-11-19 03:03:13 +01:00
|
|
|
if "journal_size" in prop_dict:
|
|
|
|
build_command.extend(["-j", prop_dict["journal_size"]])
|
2013-12-06 00:54:55 +01:00
|
|
|
if "timestamp" in prop_dict:
|
|
|
|
build_command.extend(["-T", str(prop_dict["timestamp"])])
|
2015-03-25 03:07:40 +01:00
|
|
|
if fs_config:
|
2014-06-16 18:10:55 +02:00
|
|
|
build_command.extend(["-C", fs_config])
|
2015-03-25 03:07:40 +01:00
|
|
|
if "block_list" in prop_dict:
|
|
|
|
build_command.extend(["-B", prop_dict["block_list"]])
|
2014-12-17 21:34:12 +01:00
|
|
|
build_command.extend(["-L", prop_dict["mount_point"]])
|
2015-03-25 03:07:40 +01:00
|
|
|
if "selinux_fc" in prop_dict:
|
2012-04-08 19:42:34 +02:00
|
|
|
build_command.append(prop_dict["selinux_fc"])
|
2015-03-03 21:30:37 +01:00
|
|
|
elif fs_type.startswith("squash"):
|
|
|
|
build_command = ["mksquashfsimage.sh"]
|
|
|
|
build_command.extend([in_dir, out_file])
|
2015-06-24 19:44:29 +02:00
|
|
|
build_command.extend(["-s"])
|
2015-03-03 21:30:37 +01:00
|
|
|
build_command.extend(["-m", prop_dict["mount_point"]])
|
2015-03-25 03:07:40 +01:00
|
|
|
if "selinux_fc" in prop_dict:
|
2015-03-03 21:30:37 +01:00
|
|
|
build_command.extend(["-c", prop_dict["selinux_fc"]])
|
2014-06-16 23:17:40 +02:00
|
|
|
elif fs_type.startswith("f2fs"):
|
|
|
|
build_command = ["mkf2fsuserimg.sh"]
|
|
|
|
build_command.extend([out_file, prop_dict["partition_size"]])
|
2011-10-29 02:02:30 +02:00
|
|
|
else:
|
|
|
|
build_command = ["mkyaffs2image", "-f"]
|
|
|
|
if prop_dict.get("mkyaffs2_extra_flags", None):
|
|
|
|
build_command.extend(prop_dict["mkyaffs2_extra_flags"].split())
|
|
|
|
build_command.append(in_dir)
|
|
|
|
build_command.append(out_file)
|
2012-04-08 19:42:34 +02:00
|
|
|
if "selinux_fc" in prop_dict:
|
|
|
|
build_command.append(prop_dict["selinux_fc"])
|
|
|
|
build_command.append(prop_dict["mount_point"])
|
2011-10-29 02:02:30 +02:00
|
|
|
|
2015-03-25 03:07:40 +01:00
|
|
|
if in_dir != origin_in:
|
|
|
|
# Construct a staging directory of the root file system.
|
|
|
|
ramdisk_dir = prop_dict.get("ramdisk_dir")
|
|
|
|
if ramdisk_dir:
|
|
|
|
shutil.rmtree(in_dir)
|
|
|
|
shutil.copytree(ramdisk_dir, in_dir, symlinks=True)
|
|
|
|
staging_system = os.path.join(in_dir, "system")
|
|
|
|
shutil.rmtree(staging_system, ignore_errors=True)
|
|
|
|
shutil.copytree(origin_in, staging_system, symlinks=True)
|
2015-06-23 20:16:05 +02:00
|
|
|
|
|
|
|
reserved_blocks = prop_dict.get("has_ext4_reserved_blocks") == "true"
|
|
|
|
ext4fs_output = None
|
|
|
|
|
2015-03-25 03:07:40 +01:00
|
|
|
try:
|
2015-06-23 20:16:05 +02:00
|
|
|
if reserved_blocks and fs_type.startswith("ext4"):
|
|
|
|
(ext4fs_output, exit_code) = RunCommand(build_command)
|
|
|
|
else:
|
|
|
|
(_, exit_code) = RunCommand(build_command)
|
2015-03-25 03:07:40 +01:00
|
|
|
finally:
|
|
|
|
if in_dir != origin_in:
|
|
|
|
# Clean up temporary directories and files.
|
|
|
|
shutil.rmtree(in_dir, ignore_errors=True)
|
|
|
|
if fs_config:
|
|
|
|
os.remove(fs_config)
|
2012-11-27 03:10:23 +01:00
|
|
|
if exit_code != 0:
|
|
|
|
return False
|
|
|
|
|
2015-06-23 20:16:05 +02:00
|
|
|
# Bug: 21522719, 22023465
|
|
|
|
# There are some reserved blocks on ext4 FS (lesser of 4096 blocks and 2%).
|
|
|
|
# We need to deduct those blocks from the available space, since they are
|
|
|
|
# not writable even with root privilege. It only affects devices using
|
|
|
|
# file-based OTA and a kernel version of 3.10 or greater (currently just
|
|
|
|
# sprout).
|
|
|
|
if reserved_blocks and fs_type.startswith("ext4"):
|
|
|
|
assert ext4fs_output is not None
|
|
|
|
ext4fs_stats = re.compile(
|
|
|
|
r'Created filesystem with .* (?P<used_blocks>[0-9]+)/'
|
|
|
|
r'(?P<total_blocks>[0-9]+) blocks')
|
|
|
|
m = ext4fs_stats.match(ext4fs_output.strip().split('\n')[-1])
|
|
|
|
used_blocks = int(m.groupdict().get('used_blocks'))
|
|
|
|
total_blocks = int(m.groupdict().get('total_blocks'))
|
|
|
|
reserved_blocks = min(4096, int(total_blocks * 0.02))
|
|
|
|
adjusted_blocks = total_blocks - reserved_blocks
|
|
|
|
if used_blocks > adjusted_blocks:
|
|
|
|
mount_point = prop_dict.get("mount_point")
|
|
|
|
print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
|
|
|
|
"reserved: %d blocks, available: %d blocks)" % (
|
|
|
|
mount_point, total_blocks, used_blocks, reserved_blocks,
|
|
|
|
adjusted_blocks))
|
|
|
|
return False
|
|
|
|
|
2015-03-24 20:42:03 +01:00
|
|
|
if not fs_spans_partition:
|
|
|
|
mount_point = prop_dict.get("mount_point")
|
|
|
|
partition_size = int(prop_dict.get("partition_size"))
|
|
|
|
image_size = os.stat(out_file).st_size
|
|
|
|
if image_size > partition_size:
|
2015-06-23 20:16:05 +02:00
|
|
|
print("Error: %s image size of %d is larger than partition size of "
|
|
|
|
"%d" % (mount_point, image_size, partition_size))
|
|
|
|
return False
|
2015-03-24 20:42:03 +01:00
|
|
|
if verity_supported and is_verity_partition:
|
2015-06-23 20:16:05 +02:00
|
|
|
if 2 * image_size - AdjustPartitionSizeForVerity(image_size) > partition_size:
|
|
|
|
print "Error: No more room on %s to fit verity data" % mount_point
|
|
|
|
return False
|
2015-03-24 20:42:03 +01:00
|
|
|
prop_dict["original_partition_size"] = prop_dict["partition_size"]
|
|
|
|
prop_dict["partition_size"] = str(image_size)
|
|
|
|
|
2013-06-17 02:26:08 +02:00
|
|
|
# create the verified image if this is to be verified
|
2014-05-06 07:19:37 +02:00
|
|
|
if verity_supported and is_verity_partition:
|
2013-06-17 02:26:08 +02:00
|
|
|
if not MakeVerityEnabledImage(out_file, prop_dict):
|
|
|
|
return False
|
|
|
|
|
2013-02-27 22:54:02 +01:00
|
|
|
if run_fsck and prop_dict.get("skip_fsck") != "true":
|
2013-12-06 02:09:18 +01:00
|
|
|
success, unsparse_image = UnsparseImage(out_file, replace=False)
|
2013-06-17 02:26:08 +02:00
|
|
|
if not success:
|
2012-11-27 03:10:23 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
# Run e2fsck on the inflated image file
|
|
|
|
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
|
2015-06-23 20:16:05 +02:00
|
|
|
(_, exit_code) = RunCommand(e2fsck_command)
|
2012-11-27 03:10:23 +01:00
|
|
|
|
|
|
|
os.remove(unsparse_image)
|
|
|
|
|
|
|
|
return exit_code == 0
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def ImagePropFromGlobalDict(glob_dict, mount_point):
|
|
|
|
"""Build an image property dictionary from the global dictionary.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
glob_dict: the global dictionary from the build system.
|
|
|
|
mount_point: such as "system", "data" etc.
|
|
|
|
"""
|
2013-12-06 20:53:27 +01:00
|
|
|
d = {}
|
|
|
|
if "build.prop" in glob_dict:
|
|
|
|
bp = glob_dict["build.prop"]
|
|
|
|
if "ro.build.date.utc" in bp:
|
|
|
|
d["timestamp"] = bp["ro.build.date.utc"]
|
2011-11-04 19:37:01 +01:00
|
|
|
|
|
|
|
def copy_prop(src_p, dest_p):
|
|
|
|
if src_p in glob_dict:
|
|
|
|
d[dest_p] = str(glob_dict[src_p])
|
|
|
|
|
2011-10-29 02:02:30 +02:00
|
|
|
common_props = (
|
|
|
|
"extfs_sparse_flag",
|
|
|
|
"mkyaffs2_extra_flags",
|
2012-04-08 19:42:34 +02:00
|
|
|
"selinux_fc",
|
2013-02-27 22:54:02 +01:00
|
|
|
"skip_fsck",
|
2013-06-17 02:26:08 +02:00
|
|
|
"verity",
|
|
|
|
"verity_key",
|
2014-07-11 00:42:38 +02:00
|
|
|
"verity_signer_cmd"
|
2011-10-29 02:02:30 +02:00
|
|
|
)
|
|
|
|
for p in common_props:
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop(p, p)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
d["mount_point"] = mount_point
|
|
|
|
if mount_point == "system":
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("fs_type", "fs_type")
|
2015-03-24 03:13:21 +01:00
|
|
|
# Copy the generic sysetem fs type first, override with specific one if
|
|
|
|
# available.
|
2015-03-03 21:30:37 +01:00
|
|
|
copy_prop("system_fs_type", "fs_type")
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("system_size", "partition_size")
|
2014-11-19 03:03:13 +01:00
|
|
|
copy_prop("system_journal_size", "journal_size")
|
2014-07-11 00:42:38 +02:00
|
|
|
copy_prop("system_verity_block_device", "verity_block_device")
|
2015-04-01 20:21:55 +02:00
|
|
|
copy_prop("system_root_image", "system_root_image")
|
|
|
|
copy_prop("ramdisk_dir", "ramdisk_dir")
|
2015-07-19 11:38:53 +02:00
|
|
|
copy_prop("ramdisk_fs_config", "ramdisk_fs_config")
|
2015-06-23 20:16:05 +02:00
|
|
|
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
|
2011-10-29 02:02:30 +02:00
|
|
|
elif mount_point == "data":
|
2014-06-16 23:17:40 +02:00
|
|
|
# Copy the generic fs type first, override with specific one if available.
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("fs_type", "fs_type")
|
2014-06-16 23:17:40 +02:00
|
|
|
copy_prop("userdata_fs_type", "fs_type")
|
2011-11-04 19:37:01 +01:00
|
|
|
copy_prop("userdata_size", "partition_size")
|
|
|
|
elif mount_point == "cache":
|
|
|
|
copy_prop("cache_fs_type", "fs_type")
|
|
|
|
copy_prop("cache_size", "partition_size")
|
2013-03-20 19:02:05 +01:00
|
|
|
elif mount_point == "vendor":
|
|
|
|
copy_prop("vendor_fs_type", "fs_type")
|
|
|
|
copy_prop("vendor_size", "partition_size")
|
2014-11-19 03:03:13 +01:00
|
|
|
copy_prop("vendor_journal_size", "journal_size")
|
2014-07-11 00:42:38 +02:00
|
|
|
copy_prop("vendor_verity_block_device", "verity_block_device")
|
2015-06-23 20:16:05 +02:00
|
|
|
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
|
2014-03-12 01:13:27 +01:00
|
|
|
elif mount_point == "oem":
|
|
|
|
copy_prop("fs_type", "fs_type")
|
|
|
|
copy_prop("oem_size", "partition_size")
|
2014-11-19 03:03:13 +01:00
|
|
|
copy_prop("oem_journal_size", "journal_size")
|
2015-06-23 20:16:05 +02:00
|
|
|
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def LoadGlobalDict(filename):
|
|
|
|
"""Load "name=value" pairs from filename"""
|
|
|
|
d = {}
|
|
|
|
f = open(filename)
|
|
|
|
for line in f:
|
|
|
|
line = line.strip()
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
continue
|
|
|
|
k, v = line.split("=", 1)
|
|
|
|
d[k] = v
|
|
|
|
f.close()
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def main(argv):
|
|
|
|
if len(argv) != 3:
|
|
|
|
print __doc__
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
in_dir = argv[0]
|
|
|
|
glob_dict_file = argv[1]
|
|
|
|
out_file = argv[2]
|
|
|
|
|
|
|
|
glob_dict = LoadGlobalDict(glob_dict_file)
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
if "mount_point" in glob_dict:
|
2015-06-23 20:16:05 +02:00
|
|
|
# The caller knows the mount point and provides a dictionay needed by
|
|
|
|
# BuildImage().
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
image_properties = glob_dict
|
2011-11-04 19:37:01 +01:00
|
|
|
else:
|
Support to configure and build multiple custom images.
Build additional images requested by the product makefile.
This script gives the ability to build multiple additional images and
you can configure what modules/files to include in each image.
1. Define PRODUCT_CUSTOM_IMAGE_MAKEFILES in your product makefile.
PRODUCT_CUSTOM_IMAGE_MAKEFILES is a list of makefiles.
Each makefile configures an image.
For image configuration makefile foo/bar/xyz.mk, the built image
file name
will be xyz.img. So make sure they won't conflict.
2. In each image's configuration makefile, you can define variables:
- CUSTOM_IMAGE_MOUNT_POINT, the mount point, such as "oem", "odm"
etc.
- CUSTOM_IMAGE_PARTITION_SIZE
- CUSTOM_IMAGE_FILE_SYSTEM_TYPE
- CUSTOM_IMAGE_DICT_FILE, a text file defining a dictionary
accepted by BuildImage() in tools/releasetools/build_image.py.
- CUSTOM_IMAGE_MODULES, a list of module names you want to include
in the image; Not only the module itself will be installed to proper
path in the image, you can also piggyback additional files/directories
with the module's LOCAL_PICKUP_FILES.
- CUSTOM_IMAGE_COPY_FILES, a list of "<src>:<dest>" to be copied to
the image. <dest> is relativ to the root of the image.
To build all those images, run "make custom_images".
Bug: 19609718
Change-Id: Ic73587e08503a251be27797c7b00329716051927
(cherry picked from commit 5fcf1094f9cf4d57c2598237f99621f254130d71)
2015-03-13 02:30:39 +01:00
|
|
|
image_filename = os.path.basename(out_file)
|
|
|
|
mount_point = ""
|
|
|
|
if image_filename == "system.img":
|
|
|
|
mount_point = "system"
|
|
|
|
elif image_filename == "userdata.img":
|
|
|
|
mount_point = "data"
|
|
|
|
elif image_filename == "cache.img":
|
|
|
|
mount_point = "cache"
|
|
|
|
elif image_filename == "vendor.img":
|
|
|
|
mount_point = "vendor"
|
|
|
|
elif image_filename == "oem.img":
|
|
|
|
mount_point = "oem"
|
|
|
|
else:
|
|
|
|
print >> sys.stderr, "error: unknown image file name ", image_filename
|
|
|
|
exit(1)
|
|
|
|
|
|
|
|
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
|
2011-10-29 02:02:30 +02:00
|
|
|
|
|
|
|
if not BuildImage(in_dir, image_properties, out_file):
|
2015-03-24 03:13:21 +01:00
|
|
|
print >> sys.stderr, "error: failed to build %s from %s" % (out_file,
|
|
|
|
in_dir)
|
2011-10-29 02:02:30 +02:00
|
|
|
exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main(sys.argv[1:])
|