2009-04-02 21:14:19 +02:00
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2017-01-10 19:47:58 +01:00
from __future__ import print_function
2010-09-13 00:26:16 +02:00
import copy
2009-05-22 22:34:54 +02:00
import errno
2009-04-02 21:14:19 +02:00
import getopt
import getpass
2017-08-14 15:49:21 +02:00
import gzip
2009-06-22 20:32:31 +02:00
import imp
2009-04-02 21:14:19 +02:00
import os
2010-12-14 01:25:36 +01:00
import platform
2009-04-02 21:14:19 +02:00
import re
2013-03-18 18:31:26 +01:00
import shlex
2009-04-02 21:14:19 +02:00
import shutil
import subprocess
import sys
import tempfile
2010-09-13 00:26:16 +02:00
import threading
import time
2009-06-15 23:31:53 +02:00
import zipfile
2009-04-02 21:14:19 +02:00
2014-08-26 19:40:28 +02:00
import blockimgdiff
2015-04-01 20:21:55 +02:00
from hashlib import sha1 as sha1
2011-01-26 02:03:34 +01:00
2009-04-02 21:14:19 +02:00
2015-03-24 03:13:21 +01:00
class Options ( object ) :
def __init__ ( self ) :
platform_search_path = {
" linux2 " : " out/host/linux-x86 " ,
" darwin " : " out/host/darwin-x86 " ,
2014-09-09 23:59:20 +02:00
}
2015-03-24 03:13:21 +01:00
self . search_path = platform_search_path . get ( sys . platform , None )
self . signapk_path = " framework/signapk.jar " # Relative to search_path
2015-12-10 22:38:50 +01:00
self . signapk_shared_library_path = " lib64 " # Relative to search_path
2015-03-24 03:13:21 +01:00
self . extra_signapk_args = [ ]
self . java_path = " java " # Use the one on the path by default.
2016-11-08 21:08:53 +01:00
self . java_args = [ " -Xmx2048m " ] # The default JVM args.
2015-03-24 03:13:21 +01:00
self . public_key_suffix = " .x509.pem "
self . private_key_suffix = " .pk8 "
2015-03-28 00:37:23 +01:00
# use otatools built boot_signer by default
self . boot_signer_path = " boot_signer "
2015-06-10 00:48:14 +02:00
self . boot_signer_args = [ ]
self . verity_signer_path = None
self . verity_signer_args = [ ]
2015-03-24 03:13:21 +01:00
self . verbose = False
self . tempfiles = [ ]
self . device_specific = None
self . extras = { }
self . info_dict = None
2015-10-14 01:37:12 +02:00
self . source_info_dict = None
self . target_info_dict = None
2015-03-24 03:13:21 +01:00
self . worker_threads = None
2015-08-08 04:49:45 +02:00
# Stash size cannot exceed cache_size * threshold.
self . cache_size = None
self . stash_threshold = 0.8
2015-03-24 03:13:21 +01:00
OPTIONS = Options ( )
2009-04-02 21:14:19 +02:00
2009-12-16 00:06:55 +01:00
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ( " PRESIGNED " , " EXTERNAL " )
2017-11-14 20:27:32 +01:00
# The partitions allowed to be signed by AVB (Android verified boot 2.0).
AVB_PARTITIONS = ( ' boot ' , ' recovery ' , ' system ' , ' vendor ' , ' dtbo ' )
2016-05-25 02:34:52 +02:00
class ErrorCode ( object ) :
""" Define error_codes for failures that happen during the actual
update package installation .
Error codes 0 - 999 are reserved for failures before the package
installation ( i . e . low battery , package verification failure ) .
Detailed code in ' bootable/recovery/error_code.h ' """
SYSTEM_VERIFICATION_FAILURE = 1000
SYSTEM_UPDATE_FAILURE = 1001
SYSTEM_UNEXPECTED_CONTENTS = 1002
SYSTEM_NONZERO_CONTENTS = 1003
SYSTEM_RECOVER_FAILURE = 1004
VENDOR_VERIFICATION_FAILURE = 2000
VENDOR_UPDATE_FAILURE = 2001
VENDOR_UNEXPECTED_CONTENTS = 2002
VENDOR_NONZERO_CONTENTS = 2003
VENDOR_RECOVER_FAILURE = 2004
OEM_PROP_MISMATCH = 3000
FINGERPRINT_MISMATCH = 3001
THUMBPRINT_MISMATCH = 3002
OLDER_BUILD = 3003
DEVICE_MISMATCH = 3004
BAD_PATCH_FILE = 3005
INSUFFICIENT_CACHE_SPACE = 3006
TUNE_PARTITION_FAILURE = 3007
APPLY_PATCH_FAILURE = 3008
2009-12-16 00:06:55 +01:00
2015-03-24 03:13:21 +01:00
class ExternalError ( RuntimeError ) :
pass
2009-04-02 21:14:19 +02:00
2017-05-04 20:10:47 +02:00
def Run ( args , verbose = None , * * kwargs ) :
""" Create and return a subprocess.Popen object.
Caller can specify if the command line should be printed . The global
OPTIONS . verbose will be used if not specified .
"""
if verbose is None :
verbose = OPTIONS . verbose
if verbose :
2017-01-10 19:47:58 +01:00
print ( " running: " , " " . join ( args ) )
2009-04-02 21:14:19 +02:00
return subprocess . Popen ( args , * * kwargs )
2010-12-14 01:25:36 +01:00
def CloseInheritedPipes ( ) :
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work . """
if platform . system ( ) != " Darwin " :
return
for d in range ( 3 , 1025 ) :
try :
stat = os . fstat ( d )
if stat is not None :
pipebit = stat [ 0 ] & 0x1000
if pipebit != 0 :
os . close ( d )
except OSError :
pass
2015-07-09 20:51:16 +02:00
def LoadInfoDict ( input_file , input_dir = None ) :
2010-07-02 00:30:11 +02:00
""" Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict . """
2014-02-04 21:17:58 +01:00
def read_helper ( fn ) :
2015-03-24 03:13:21 +01:00
if isinstance ( input_file , zipfile . ZipFile ) :
return input_file . read ( fn )
2014-02-04 21:17:58 +01:00
else :
2015-03-24 03:13:21 +01:00
path = os . path . join ( input_file , * fn . split ( " / " ) )
2014-02-04 21:17:58 +01:00
try :
with open ( path ) as f :
return f . read ( )
2015-03-24 03:13:21 +01:00
except IOError as e :
2014-02-04 21:17:58 +01:00
if e . errno == errno . ENOENT :
raise KeyError ( fn )
2017-02-28 00:12:05 +01:00
2010-07-02 00:30:11 +02:00
try :
2014-04-16 02:40:21 +02:00
d = LoadDictionaryFromLines ( read_helper ( " META/misc_info.txt " ) . split ( " \n " ) )
2010-09-17 02:44:38 +02:00
except KeyError :
2017-02-28 00:12:05 +01:00
raise ValueError ( " can ' t find META/misc_info.txt in input target-files " )
2010-09-17 02:44:38 +02:00
2017-02-28 00:12:05 +01:00
assert " recovery_api_version " in d
2017-03-02 01:38:48 +01:00
assert " fstab_version " in d
2013-02-20 02:35:29 +01:00
2015-07-19 11:38:53 +02:00
# A few properties are stored as links to the files in the out/ directory.
# It works fine with the build system. However, they are no longer available
# when (re)generating from target_files zip. If input_dir is not None, we
# are doing repacking. Redirect those properties to the actual files in the
# unzipped directory.
2015-07-09 20:51:16 +02:00
if input_dir is not None :
2015-08-04 20:59:06 +02:00
# We carry a copy of file_contexts.bin under META/. If not available,
# search BOOT/RAMDISK/. Note that sometimes we may need a different file
2015-07-19 11:38:53 +02:00
# to build images than the one running on device, such as when enabling
# system_root_image. In that case, we must have the one for image
# generation copied to META/.
2015-08-28 19:52:03 +02:00
fc_basename = os . path . basename ( d . get ( " selinux_fc " , " file_contexts " ) )
fc_config = os . path . join ( input_dir , " META " , fc_basename )
2015-07-19 11:38:53 +02:00
if d . get ( " system_root_image " ) == " true " :
assert os . path . exists ( fc_config )
2015-07-09 20:51:16 +02:00
if not os . path . exists ( fc_config ) :
2015-08-28 19:52:03 +02:00
fc_config = os . path . join ( input_dir , " BOOT " , " RAMDISK " , fc_basename )
2015-07-09 20:51:16 +02:00
if not os . path . exists ( fc_config ) :
fc_config = None
if fc_config :
d [ " selinux_fc " ] = fc_config
2015-07-19 11:38:53 +02:00
# Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
if d . get ( " system_root_image " ) == " true " :
d [ " ramdisk_dir " ] = os . path . join ( input_dir , " ROOT " )
d [ " ramdisk_fs_config " ] = os . path . join (
input_dir , " META " , " root_filesystem_config.txt " )
2016-03-30 00:12:37 +02:00
# Redirect {system,vendor}_base_fs_file.
if " system_base_fs_file " in d :
basename = os . path . basename ( d [ " system_base_fs_file " ] )
system_base_fs_file = os . path . join ( input_dir , " META " , basename )
2016-05-03 17:01:19 +02:00
if os . path . exists ( system_base_fs_file ) :
d [ " system_base_fs_file " ] = system_base_fs_file
else :
2017-01-10 19:47:58 +01:00
print ( " Warning: failed to find system base fs file: %s " % (
system_base_fs_file , ) )
2016-05-03 17:01:19 +02:00
del d [ " system_base_fs_file " ]
2016-03-30 00:12:37 +02:00
if " vendor_base_fs_file " in d :
basename = os . path . basename ( d [ " vendor_base_fs_file " ] )
vendor_base_fs_file = os . path . join ( input_dir , " META " , basename )
2016-05-03 17:01:19 +02:00
if os . path . exists ( vendor_base_fs_file ) :
d [ " vendor_base_fs_file " ] = vendor_base_fs_file
else :
2017-01-10 19:47:58 +01:00
print ( " Warning: failed to find vendor base fs file: %s " % (
vendor_base_fs_file , ) )
2016-05-03 17:01:19 +02:00
del d [ " vendor_base_fs_file " ]
2016-03-30 00:12:37 +02:00
2009-08-04 02:27:48 +02:00
try :
2014-02-04 21:17:58 +01:00
data = read_helper ( " META/imagesizes.txt " )
2010-09-17 02:44:38 +02:00
for line in data . split ( " \n " ) :
2015-03-24 03:13:21 +01:00
if not line :
continue
2010-09-17 16:44:38 +02:00
name , value = line . split ( " " , 1 )
2015-03-24 03:13:21 +01:00
if not value :
continue
2010-09-17 02:44:38 +02:00
if name == " blocksize " :
d [ name ] = value
else :
d [ name + " _size " ] = value
except KeyError :
pass
def makeint ( key ) :
if key in d :
d [ key ] = int ( d [ key ] , 0 )
makeint ( " recovery_api_version " )
makeint ( " blocksize " )
makeint ( " system_size " )
2014-07-11 00:42:38 +02:00
makeint ( " vendor_size " )
2010-09-17 02:44:38 +02:00
makeint ( " userdata_size " )
2011-11-04 19:37:01 +01:00
makeint ( " cache_size " )
2010-09-17 02:44:38 +02:00
makeint ( " recovery_size " )
makeint ( " boot_size " )
2013-02-20 02:35:29 +01:00
makeint ( " fstab_version " )
2009-04-02 21:14:19 +02:00
2016-03-08 01:31:19 +01:00
system_root_image = d . get ( " system_root_image " , None ) == " true "
if d . get ( " no_recovery " , None ) != " true " :
recovery_fstab_path = " RECOVERY/RAMDISK/etc/recovery.fstab "
d [ " fstab " ] = LoadRecoveryFSTab ( read_helper , d [ " fstab_version " ] ,
recovery_fstab_path , system_root_image )
elif d . get ( " recovery_as_boot " , None ) == " true " :
recovery_fstab_path = " BOOT/RAMDISK/etc/recovery.fstab "
2015-11-20 02:05:46 +01:00
d [ " fstab " ] = LoadRecoveryFSTab ( read_helper , d [ " fstab_version " ] ,
2016-03-08 01:31:19 +01:00
recovery_fstab_path , system_root_image )
else :
d [ " fstab " ] = None
2017-08-26 22:10:26 +02:00
d [ " build.prop " ] = LoadBuildProp ( read_helper , ' SYSTEM/build.prop ' )
d [ " vendor.build.prop " ] = LoadBuildProp ( read_helper , ' VENDOR/build.prop ' )
2012-08-17 01:19:00 +02:00
return d
2017-03-02 01:38:48 +01:00
2017-08-26 22:10:26 +02:00
def LoadBuildProp ( read_helper , prop_file ) :
2012-08-17 01:19:00 +02:00
try :
2017-08-26 22:10:26 +02:00
data = read_helper ( prop_file )
2012-08-17 01:19:00 +02:00
except KeyError :
2017-08-26 22:10:26 +02:00
print ( " Warning: could not read %s " % ( prop_file , ) )
2012-08-17 01:19:00 +02:00
data = " "
2014-04-16 02:40:21 +02:00
return LoadDictionaryFromLines ( data . split ( " \n " ) )
2012-08-17 01:19:00 +02:00
2017-03-02 01:38:48 +01:00
2014-04-16 02:40:21 +02:00
def LoadDictionaryFromLines ( lines ) :
2012-08-17 01:19:00 +02:00
d = { }
2014-04-16 02:40:21 +02:00
for line in lines :
2012-08-17 01:19:00 +02:00
line = line . strip ( )
2015-03-24 03:13:21 +01:00
if not line or line . startswith ( " # " ) :
continue
2014-04-15 20:24:00 +02:00
if " = " in line :
name , value = line . split ( " = " , 1 )
d [ name ] = value
2010-09-21 03:04:41 +02:00
return d
2017-03-02 01:38:48 +01:00
2016-03-08 01:31:19 +01:00
def LoadRecoveryFSTab ( read_helper , fstab_version , recovery_fstab_path ,
system_root_image = False ) :
2010-09-21 03:04:41 +02:00
class Partition ( object ) :
2017-03-02 01:38:48 +01:00
def __init__ ( self , mount_point , fs_type , device , length , context ) :
2015-03-24 03:13:21 +01:00
self . mount_point = mount_point
self . fs_type = fs_type
self . device = device
self . length = length
2015-06-10 21:32:41 +02:00
self . context = context
2010-09-21 03:04:41 +02:00
try :
2016-03-08 01:31:19 +01:00
data = read_helper ( recovery_fstab_path )
2010-09-21 03:04:41 +02:00
except KeyError :
2017-01-10 19:47:58 +01:00
print ( " Warning: could not find {} " . format ( recovery_fstab_path ) )
2011-10-27 03:08:09 +02:00
data = " "
2010-09-21 03:04:41 +02:00
2017-03-02 01:38:48 +01:00
assert fstab_version == 2
d = { }
for line in data . split ( " \n " ) :
line = line . strip ( )
if not line or line . startswith ( " # " ) :
continue
# <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
pieces = line . split ( )
if len ( pieces ) != 5 :
raise ValueError ( " malformed recovery.fstab line: \" %s \" " % ( line , ) )
# Ignore entries that are managed by vold.
options = pieces [ 4 ]
if " voldmanaged= " in options :
continue
# It's a good line, parse it.
length = 0
options = options . split ( " , " )
for i in options :
if i . startswith ( " length= " ) :
length = int ( i [ 7 : ] )
2011-02-18 00:54:20 +01:00
else :
2017-03-02 01:38:48 +01:00
# Ignore all unknown options in the unified fstab.
2015-03-24 03:13:21 +01:00
continue
2013-02-20 02:35:29 +01:00
2017-03-02 01:38:48 +01:00
mount_flags = pieces [ 3 ]
# Honor the SELinux context if present.
context = None
for i in mount_flags . split ( " , " ) :
if i . startswith ( " context= " ) :
context = i
2013-02-20 02:35:29 +01:00
2017-03-02 01:38:48 +01:00
mount_point = pieces [ 1 ]
d [ mount_point ] = Partition ( mount_point = mount_point , fs_type = pieces [ 2 ] ,
device = pieces [ 0 ] , length = length , context = context )
2011-02-18 00:54:20 +01:00
2015-06-06 02:59:27 +02:00
# / is used for the system mount point when the root directory is included in
2015-07-22 03:01:20 +02:00
# system. Other areas assume system is always at "/system" so point /system
# at /.
2015-06-06 02:59:27 +02:00
if system_root_image :
assert not d . has_key ( " /system " ) and d . has_key ( " / " )
d [ " /system " ] = d [ " / " ]
2010-09-17 02:44:38 +02:00
return d
2010-08-26 05:39:41 +02:00
2010-09-21 03:04:41 +02:00
2010-09-17 02:44:38 +02:00
def DumpInfoDict ( d ) :
for k , v in sorted ( d . items ( ) ) :
2017-01-10 19:47:58 +01:00
print ( " %-25s = ( %s ) %s " % ( k , type ( v ) . __name__ , v ) )
2010-08-26 05:39:41 +02:00
2015-03-24 03:13:21 +01:00
2017-05-26 12:30:04 +02:00
def AppendAVBSigningArgs ( cmd , partition ) :
""" Append signing arguments for avbtool. """
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
key_path = OPTIONS . info_dict . get ( " avb_ " + partition + " _key_path " )
algorithm = OPTIONS . info_dict . get ( " avb_ " + partition + " _algorithm " )
if key_path and algorithm :
cmd . extend ( [ " --key " , key_path , " --algorithm " , algorithm ] )
2017-09-28 02:17:43 +02:00
avb_salt = OPTIONS . info_dict . get ( " avb_salt " )
# make_vbmeta_image doesn't like "--salt" (and it's not needed).
if avb_salt and partition != " vbmeta " :
cmd . extend ( [ " --salt " , avb_salt ] )
2017-05-26 12:30:04 +02:00
2015-07-22 03:01:20 +02:00
def _BuildBootableImage ( sourcedir , fs_config_file , info_dict = None ,
2016-11-30 21:11:57 +01:00
has_ramdisk = False , two_step_image = False ) :
2015-07-22 03:01:20 +02:00
""" Build a bootable image from the specified sourcedir.
2009-06-24 02:40:35 +02:00
2015-07-22 03:01:20 +02:00
Take a kernel , cmdline , and optionally a ramdisk directory from the input ( in
2016-11-30 21:11:57 +01:00
' sourcedir ' ) , and turn them into a boot image . ' two_step_image ' indicates if
we are building a two - step special image ( i . e . building a recovery image to
be loaded into / boot in two - step OTAs ) .
Return the image data , or None if sourcedir does not appear to contains files
for building the requested image .
"""
2015-07-22 03:01:20 +02:00
def make_ramdisk ( ) :
ramdisk_img = tempfile . NamedTemporaryFile ( )
if os . access ( fs_config_file , os . F_OK ) :
cmd = [ " mkbootfs " , " -f " , fs_config_file ,
os . path . join ( sourcedir , " RAMDISK " ) ]
else :
cmd = [ " mkbootfs " , os . path . join ( sourcedir , " RAMDISK " ) ]
p1 = Run ( cmd , stdout = subprocess . PIPE )
p2 = Run ( [ " minigzip " ] , stdin = p1 . stdout , stdout = ramdisk_img . file . fileno ( ) )
p2 . wait ( )
p1 . wait ( )
assert p1 . returncode == 0 , " mkbootfs of %s ramdisk failed " % ( sourcedir , )
assert p2 . returncode == 0 , " minigzip of %s ramdisk failed " % ( sourcedir , )
return ramdisk_img
if not os . access ( os . path . join ( sourcedir , " kernel " ) , os . F_OK ) :
return None
if has_ramdisk and not os . access ( os . path . join ( sourcedir , " RAMDISK " ) , os . F_OK ) :
2009-06-24 02:40:35 +02:00
return None
2009-04-02 21:14:19 +02:00
2012-08-02 23:46:42 +02:00
if info_dict is None :
info_dict = OPTIONS . info_dict
2009-04-02 21:14:19 +02:00
img = tempfile . NamedTemporaryFile ( )
2015-07-22 03:01:20 +02:00
if has_ramdisk :
ramdisk_img = make_ramdisk ( )
2009-04-02 21:14:19 +02:00
2012-11-26 01:53:44 +01:00
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os . getenv ( ' MKBOOTIMG ' ) or " mkbootimg "
cmd = [ mkbootimg , " --kernel " , os . path . join ( sourcedir , " kernel " ) ]
2009-06-17 18:07:09 +02:00
2014-07-14 21:00:43 +02:00
fn = os . path . join ( sourcedir , " second " )
if os . access ( fn , os . F_OK ) :
cmd . append ( " --second " )
cmd . append ( fn )
2009-06-16 07:36:37 +02:00
fn = os . path . join ( sourcedir , " cmdline " )
if os . access ( fn , os . F_OK ) :
2009-06-17 18:07:09 +02:00
cmd . append ( " --cmdline " )
cmd . append ( open ( fn ) . read ( ) . rstrip ( " \n " ) )
fn = os . path . join ( sourcedir , " base " )
if os . access ( fn , os . F_OK ) :
cmd . append ( " --base " )
cmd . append ( open ( fn ) . read ( ) . rstrip ( " \n " ) )
2010-08-25 23:29:34 +02:00
fn = os . path . join ( sourcedir , " pagesize " )
if os . access ( fn , os . F_OK ) :
cmd . append ( " --pagesize " )
cmd . append ( open ( fn ) . read ( ) . rstrip ( " \n " ) )
2012-08-02 23:46:42 +02:00
args = info_dict . get ( " mkbootimg_args " , None )
if args and args . strip ( ) :
2013-04-18 00:19:19 +02:00
cmd . extend ( shlex . split ( args ) )
2012-08-02 23:46:42 +02:00
2016-03-15 17:49:30 +01:00
args = info_dict . get ( " mkbootimg_version_args " , None )
if args and args . strip ( ) :
cmd . extend ( shlex . split ( args ) )
2015-07-22 03:01:20 +02:00
if has_ramdisk :
cmd . extend ( [ " --ramdisk " , ramdisk_img . name ] )
2015-03-30 08:07:41 +02:00
img_unsigned = None
if info_dict . get ( " vboot " , None ) :
img_unsigned = tempfile . NamedTemporaryFile ( )
2015-07-22 03:01:20 +02:00
cmd . extend ( [ " --output " , img_unsigned . name ] )
2015-03-30 08:07:41 +02:00
else :
2015-07-22 03:01:20 +02:00
cmd . extend ( [ " --output " , img . name ] )
2009-06-17 18:07:09 +02:00
2017-07-12 02:27:55 +02:00
# "boot" or "recovery", without extension.
partition_name = os . path . basename ( sourcedir ) . lower ( )
2009-06-17 18:07:09 +02:00
p = Run ( cmd , stdout = subprocess . PIPE )
2009-04-02 21:14:19 +02:00
p . communicate ( )
2017-07-12 02:27:55 +02:00
assert p . returncode == 0 , " mkbootimg of %s image failed " % ( partition_name , )
2009-04-02 21:14:19 +02:00
2015-04-07 16:08:59 +02:00
if ( info_dict . get ( " boot_signer " , None ) == " true " and
info_dict . get ( " verity_key " , None ) ) :
2016-11-30 21:11:57 +01:00
# Hard-code the path as "/boot" for two-step special recovery image (which
# will be loaded into /boot during the two-step OTA).
if two_step_image :
path = " /boot "
else :
2017-07-12 02:27:55 +02:00
path = " / " + partition_name
2015-06-10 00:48:14 +02:00
cmd = [ OPTIONS . boot_signer_path ]
cmd . extend ( OPTIONS . boot_signer_args )
cmd . extend ( [ path , img . name ,
info_dict [ " verity_key " ] + " .pk8 " ,
info_dict [ " verity_key " ] + " .x509.pem " , img . name ] )
2014-08-20 02:27:56 +02:00
p = Run ( cmd , stdout = subprocess . PIPE )
p . communicate ( )
assert p . returncode == 0 , " boot_signer of %s image failed " % path
2015-03-30 08:07:41 +02:00
# Sign the image if vboot is non-empty.
elif info_dict . get ( " vboot " , None ) :
2017-07-12 02:27:55 +02:00
path = " / " + partition_name
2015-03-30 08:07:41 +02:00
img_keyblock = tempfile . NamedTemporaryFile ( )
2017-02-18 08:21:31 +01:00
# We have switched from the prebuilt futility binary to using the tool
# (futility-host) built from the source. Override the setting in the old
# TF.zip.
futility = info_dict [ " futility " ]
if futility . startswith ( " prebuilts/ " ) :
futility = " futility-host "
cmd = [ info_dict [ " vboot_signer_cmd " ] , futility ,
2015-03-30 08:07:41 +02:00
img_unsigned . name , info_dict [ " vboot_key " ] + " .vbpubk " ,
2015-08-10 20:43:45 +02:00
info_dict [ " vboot_key " ] + " .vbprivk " ,
info_dict [ " vboot_subkey " ] + " .vbprivk " ,
img_keyblock . name ,
2015-03-30 08:07:41 +02:00
img . name ]
p = Run ( cmd , stdout = subprocess . PIPE )
p . communicate ( )
assert p . returncode == 0 , " vboot_signer of %s image failed " % path
2015-04-01 20:21:55 +02:00
# Clean up the temp files.
img_unsigned . close ( )
img_keyblock . close ( )
2017-12-01 22:24:01 +01:00
# AVB: if enabled, calculate and add hash to boot.img or recovery.img.
2017-05-26 12:30:04 +02:00
if info_dict . get ( " avb_enable " ) == " true " :
2017-05-24 08:06:55 +02:00
avbtool = os . getenv ( ' AVBTOOL ' ) or info_dict [ " avb_avbtool " ]
2017-12-01 22:24:01 +01:00
part_size = info_dict [ partition_name + " _size " ]
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
cmd = [ avbtool , " add_hash_footer " , " --image " , img . name ,
2017-07-12 02:27:55 +02:00
" --partition_size " , str ( part_size ) , " --partition_name " ,
partition_name ]
AppendAVBSigningArgs ( cmd , partition_name )
2017-12-01 22:24:01 +01:00
args = info_dict . get ( " avb_ " + partition_name + " _add_hash_footer_args " )
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
if args and args . strip ( ) :
cmd . extend ( shlex . split ( args ) )
p = Run ( cmd , stdout = subprocess . PIPE )
p . communicate ( )
assert p . returncode == 0 , " avbtool add_hash_footer of %s failed " % (
2017-07-12 02:27:55 +02:00
partition_name , )
2016-01-29 22:59:17 +01:00
img . seek ( os . SEEK_SET , 0 )
data = img . read ( )
if has_ramdisk :
ramdisk_img . close ( )
img . close ( )
return data
2012-08-02 23:46:42 +02:00
def GetBootableImage ( name , prebuilt_name , unpack_dir , tree_subdir ,
2016-11-30 21:11:57 +01:00
info_dict = None , two_step_image = False ) :
2015-07-22 03:01:20 +02:00
""" Return a File object with the desired bootable image.
Look for it in ' unpack_dir ' / BOOTABLE_IMAGES under the name ' prebuilt_name ' ,
otherwise look for it under ' unpack_dir ' / IMAGES , otherwise construct it from
the source files in ' unpack_dir ' / ' tree_subdir ' . """
2011-01-26 02:03:34 +01:00
prebuilt_path = os . path . join ( unpack_dir , " BOOTABLE_IMAGES " , prebuilt_name )
if os . path . exists ( prebuilt_path ) :
2017-01-10 19:47:58 +01:00
print ( " using prebuilt %s from BOOTABLE_IMAGES... " % ( prebuilt_name , ) )
2011-01-26 02:03:34 +01:00
return File . FromLocalFile ( name , prebuilt_path )
2014-08-22 17:07:12 +02:00
prebuilt_path = os . path . join ( unpack_dir , " IMAGES " , prebuilt_name )
if os . path . exists ( prebuilt_path ) :
2017-01-10 19:47:58 +01:00
print ( " using prebuilt %s from IMAGES... " % ( prebuilt_name , ) )
2014-08-22 17:07:12 +02:00
return File . FromLocalFile ( name , prebuilt_path )
2017-01-10 19:47:58 +01:00
print ( " building image from target_files %s ... " % ( tree_subdir , ) )
2015-07-22 03:01:20 +02:00
if info_dict is None :
info_dict = OPTIONS . info_dict
# With system_root_image == "true", we don't pack ramdisk into the boot image.
2015-11-11 04:21:34 +01:00
# Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
# for recovery.
has_ramdisk = ( info_dict . get ( " system_root_image " ) != " true " or
prebuilt_name != " boot.img " or
info_dict . get ( " recovery_as_boot " ) == " true " )
2015-07-22 03:01:20 +02:00
2014-08-22 17:07:12 +02:00
fs_config = " META/ " + tree_subdir . lower ( ) + " _filesystem_config.txt "
Update for new Android Verified Boot (AVB).
This updates the build system for the new Android Verified Boot
codebase. As this is based on Brillo Verified Boot, this change replaces
the existing BVB support.
Android Verified Boot is enabled by the BOARD_AVB_ENABLE variable
BOARD_AVB_ENABLE := true
This will make the build system create vbmeta.img which will contain a
hash descriptor for boot.img, a hashtree descriptor for system.img, a
kernel-cmdline descriptor for setting up dm-verity for system.img and
append a hash-tree to system.img.
Additionally, the descriptors are left in boot.img and system.img so a
third party can create their own vbmeta.img file linking - using the
option --chain_partition - to these images. If this is not needed
footers can be erased using the 'avbtool erase_footer' command. It's
also harmless to just leave them in the images.
By default, the algorithm SHA256_RSA4096 is used with a test key from
the AVB source directory. This can be overriden by the
BOARD_AVB_ALGORITHM and BOARD_AVB_KEY_PATH variables to use e.g. a
4096-bit RSA key and SHA-512:
BOARD_AVB_ALGORITHM := SHA512_RSA4096
BOARD_AVB_KEY_PATH := /path/to/rsa_key_4096bits.pem
To prevent rollback attacks, the rollback index should be increased on a
regular basis. The rollback index can be set with the
BOARD_AVB_ROLLBACK_INDEX variable:
BOARD_AVB_ROLLBACK_INDEX := 5
If this is not set, the rollback index defaults to 0.
The variable BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS can be used to specify
additional options passed to 'avbtool make_vbmeta_image'. Typical
options to be used here include '--prop', '--prop_from_file', and
'--chain_partition'.
The variable BOARD_AVBTOOL_BOOT_ADD_HASH_FOOTER_ARGS can be used to
specify additional options passed to 'avbtool add_hash_footer' for
boot.img. Typical options to be used here include '--hash_algorithm' and
'--salt'.
The variable BOARD_AVBTOOL_SYSTEM_ADD_HASHTREE_FOOTER_ARGS can be used
to specify additional options passed to 'avbtool add_hashtree_footer'
for systems.img. Typical options to be used here include
'--hash_algorithm', '--salt', and '--block_size'.
BUG=31264226
TEST=Manually tested on edison-eng by inspecting {boot, system,
vbmeta}.img in out/ directory as well as their counterparts in
the IMAGES/ directory of edision-target_files-eng.zeuthen.zip
Merged-In: Ic9a61cfc65c148b12996e57f04da5432eef6b982
Change-Id: I97042655bca15e7eac899f12c5bada2f6184d307
2016-09-15 19:43:54 +02:00
data = _BuildBootableImage ( os . path . join ( unpack_dir , tree_subdir ) ,
os . path . join ( unpack_dir , fs_config ) ,
2016-11-30 21:11:57 +01:00
info_dict , has_ramdisk , two_step_image )
2014-08-22 17:07:12 +02:00
if data :
return File ( name , data )
return None
2009-04-02 21:14:19 +02:00
2017-08-14 15:49:21 +02:00
def Gunzip ( in_filename , out_filename ) :
""" Gunzip the given gzip compressed file to a given output file.
"""
with gzip . open ( in_filename , " rb " ) as in_file , open ( out_filename , " wb " ) as out_file :
shutil . copyfileobj ( in_file , out_file )
2009-12-08 22:46:44 +01:00
def UnzipTemp ( filename , pattern = None ) :
2017-12-25 19:43:47 +01:00
""" Unzips the given archive into a temporary directory and returns the name.
2011-01-26 02:03:34 +01:00
2017-12-25 19:43:47 +01:00
If filename is of the form " foo.zip+bar.zip " , unzip foo . zip into a temp dir ,
then unzip bar . zip into that_dir / BOOTABLE_IMAGES .
2011-01-26 02:03:34 +01:00
2017-12-25 19:43:47 +01:00
Returns :
( tempdir , zipobj ) : tempdir is the name of the temprary directory ; zipobj is
a zipfile . ZipFile ( of the main file ) , open for reading .
2011-01-26 02:03:34 +01:00
"""
2009-04-02 21:14:19 +02:00
2011-01-26 02:03:34 +01:00
def unzip_to_dir ( filename , dirname ) :
cmd = [ " unzip " , " -o " , " -q " , filename , " -d " , dirname ]
if pattern is not None :
2017-03-05 20:38:11 +01:00
cmd . extend ( pattern )
2011-01-26 02:03:34 +01:00
p = Run ( cmd , stdout = subprocess . PIPE )
p . communicate ( )
if p . returncode != 0 :
raise ExternalError ( " failed to unzip input target-files \" %s \" " %
( filename , ) )
2017-12-25 19:43:47 +01:00
tmp = MakeTempDir ( prefix = " targetfiles- " )
2011-01-26 02:03:34 +01:00
m = re . match ( r " ^(.*[.]zip) \ +(.*[.]zip)$ " , filename , re . IGNORECASE )
if m :
unzip_to_dir ( m . group ( 1 ) , tmp )
unzip_to_dir ( m . group ( 2 ) , os . path . join ( tmp , " BOOTABLE_IMAGES " ) )
filename = m . group ( 1 )
else :
unzip_to_dir ( filename , tmp )
return tmp , zipfile . ZipFile ( filename , " r " )
2009-04-02 21:14:19 +02:00
def GetKeyPasswords ( keylist ) :
""" Given a list of keys, prompt the user to enter passwords for
those which require them . Return a { key : password } dict . password
will be None if the key has no password . """
2009-05-22 22:34:54 +02:00
no_passwords = [ ]
need_passwords = [ ]
2013-03-18 18:31:26 +01:00
key_passwords = { }
2009-04-02 21:14:19 +02:00
devnull = open ( " /dev/null " , " w+b " )
for k in sorted ( keylist ) :
2009-12-16 00:06:55 +01:00
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS :
2009-05-22 22:34:54 +02:00
no_passwords . append ( k )
2009-04-14 23:05:15 +02:00
continue
2013-03-18 18:31:26 +01:00
p = Run ( [ " openssl " , " pkcs8 " , " -in " , k + OPTIONS . private_key_suffix ,
2009-06-18 17:35:12 +02:00
" -inform " , " DER " , " -nocrypt " ] ,
stdin = devnull . fileno ( ) ,
stdout = devnull . fileno ( ) ,
stderr = subprocess . STDOUT )
2009-04-02 21:14:19 +02:00
p . communicate ( )
if p . returncode == 0 :
2013-03-18 18:31:26 +01:00
# Definitely an unencrypted key.
2009-05-22 22:34:54 +02:00
no_passwords . append ( k )
2009-04-02 21:14:19 +02:00
else :
2013-03-18 18:31:26 +01:00
p = Run ( [ " openssl " , " pkcs8 " , " -in " , k + OPTIONS . private_key_suffix ,
" -inform " , " DER " , " -passin " , " pass: " ] ,
stdin = devnull . fileno ( ) ,
stdout = devnull . fileno ( ) ,
stderr = subprocess . PIPE )
2015-03-24 03:13:21 +01:00
_ , stderr = p . communicate ( )
2013-03-18 18:31:26 +01:00
if p . returncode == 0 :
# Encrypted key with empty string as password.
key_passwords [ k ] = ' '
elif stderr . startswith ( ' Error decrypting key ' ) :
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords . append ( k )
else :
# Potentially, a type of key that openssl doesn't understand.
# We'll let the routines in signapk.jar handle it.
no_passwords . append ( k )
2009-04-02 21:14:19 +02:00
devnull . close ( )
2009-05-22 22:34:54 +02:00
2013-03-18 18:31:26 +01:00
key_passwords . update ( PasswordManager ( ) . GetPasswords ( need_passwords ) )
2009-05-22 22:34:54 +02:00
key_passwords . update ( dict . fromkeys ( no_passwords , None ) )
2009-04-02 21:14:19 +02:00
return key_passwords
2016-01-13 19:32:47 +01:00
def GetMinSdkVersion ( apk_name ) :
""" Get the minSdkVersion delared in the APK. This can be both a decimal number
( API Level ) or a codename .
"""
p = Run ( [ " aapt " , " dump " , " badging " , apk_name ] , stdout = subprocess . PIPE )
output , err = p . communicate ( )
if err :
raise ExternalError ( " Failed to obtain minSdkVersion: aapt return code %s "
% ( p . returncode , ) )
for line in output . split ( " \n " ) :
# Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
m = re . match ( r ' sdkVersion: \' ([^ \' ]*) \' ' , line )
if m :
return m . group ( 1 )
raise ExternalError ( " No minSdkVersion returned by aapt " )
def GetMinSdkVersionInt ( apk_name , codename_to_api_level_map ) :
""" Get the minSdkVersion declared in the APK as a number (API Level). If
minSdkVersion is set to a codename , it is translated to a number using the
provided map .
"""
version = GetMinSdkVersion ( apk_name )
try :
return int ( version )
except ValueError :
# Not a decimal number. Codename?
if version in codename_to_api_level_map :
return codename_to_api_level_map [ version ]
else :
raise ExternalError ( " Unknown minSdkVersion: ' %s ' . Known codenames: %s "
% ( version , codename_to_api_level_map ) )
def SignFile ( input_name , output_name , key , password , min_api_level = None ,
codename_to_api_level_map = dict ( ) ,
whole_file = False ) :
2009-04-02 21:14:19 +02:00
""" Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password ( the latter may be None if the key does not
have a password .
2009-08-14 21:44:19 +02:00
If whole_file is true , use the " -w " option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file .
2016-01-13 19:32:47 +01:00
min_api_level is the API Level ( int ) of the oldest platform this file may end
up on . If not specified for an APK , the API Level is obtained by interpreting
the minSdkVersion attribute of the APK ' s AndroidManifest.xml.
codename_to_api_level_map is needed to translate the codename which may be
encountered as the APK ' s minSdkVersion.
2009-04-02 21:14:19 +02:00
"""
2009-08-14 21:44:19 +02:00
2015-12-10 22:38:50 +01:00
java_library_path = os . path . join (
OPTIONS . search_path , OPTIONS . signapk_shared_library_path )
2016-11-08 21:08:53 +01:00
cmd = ( [ OPTIONS . java_path ] + OPTIONS . java_args +
[ " -Djava.library.path= " + java_library_path ,
" -jar " , os . path . join ( OPTIONS . search_path , OPTIONS . signapk_path ) ] +
OPTIONS . extra_signapk_args )
2009-08-14 21:44:19 +02:00
if whole_file :
cmd . append ( " -w " )
2016-01-13 19:32:47 +01:00
min_sdk_version = min_api_level
if min_sdk_version is None :
if not whole_file :
min_sdk_version = GetMinSdkVersionInt (
input_name , codename_to_api_level_map )
if min_sdk_version is not None :
cmd . extend ( [ " --min-sdk-version " , str ( min_sdk_version ) ] )
2013-03-18 18:31:26 +01:00
cmd . extend ( [ key + OPTIONS . public_key_suffix ,
key + OPTIONS . private_key_suffix ,
2015-12-04 18:21:08 +01:00
input_name , output_name ] )
2009-08-14 21:44:19 +02:00
p = Run ( cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE )
2009-04-02 21:14:19 +02:00
if password is not None :
password + = " \n "
p . communicate ( password )
if p . returncode != 0 :
raise ExternalError ( " signapk.jar failed: return code %s " % ( p . returncode , ) )
2010-09-17 02:44:38 +02:00
def CheckSize ( data , target , info_dict ) :
2017-11-14 20:27:32 +01:00
""" Checks the data string passed against the max size limit.
For non - AVB images , raise exception if the data is too big . Print a warning
if the data is nearing the maximum size .
2010-09-16 20:28:43 +02:00
2017-11-14 20:27:32 +01:00
For AVB images , the actual image size should be identical to the limit .
Args :
data : A string that contains all the data for the partition .
target : The partition name . The " .img " suffix is optional .
info_dict : The dict to be looked up for relevant info .
"""
2015-03-24 03:13:21 +01:00
if target . endswith ( " .img " ) :
target = target [ : - 4 ]
2010-09-21 03:04:41 +02:00
mount_point = " / " + target
2014-06-03 23:07:27 +02:00
fs_type = None
limit = None
2010-09-21 03:04:41 +02:00
if info_dict [ " fstab " ] :
2015-03-24 03:13:21 +01:00
if mount_point == " /userdata " :
mount_point = " /data "
2010-09-21 03:04:41 +02:00
p = info_dict [ " fstab " ] [ mount_point ]
fs_type = p . fs_type
2012-02-14 18:32:52 +01:00
device = p . device
if " / " in device :
device = device [ device . rfind ( " / " ) + 1 : ]
limit = info_dict . get ( device + " _size " , None )
2015-03-24 03:13:21 +01:00
if not fs_type or not limit :
return
2009-04-02 21:14:19 +02:00
2012-02-14 18:32:52 +01:00
size = len ( data )
2017-11-14 20:27:32 +01:00
# target could be 'userdata' or 'cache'. They should follow the non-AVB image
# path.
if info_dict . get ( " avb_enable " ) == " true " and target in AVB_PARTITIONS :
if size != limit :
raise ExternalError (
" Mismatching image size for %s : expected %d actual %d " % (
target , limit , size ) )
else :
pct = float ( size ) * 100.0 / limit
msg = " %s size ( %d ) is %.2f %% of limit ( %d ) " % ( target , size , pct , limit )
if pct > = 99.0 :
raise ExternalError ( msg )
elif pct > = 95.0 :
print ( " \n WARNING: %s \n " % ( msg , ) )
elif OPTIONS . verbose :
print ( " " , msg )
2009-04-02 21:14:19 +02:00
2009-12-16 00:06:55 +01:00
def ReadApkCerts ( tf_zip ) :
2018-01-05 20:17:34 +01:00
""" Parses the APK certs info from a given target-files zip.
Given a target - files ZipFile , parses the META / apkcerts . txt entry and returns a
tuple with the following elements : ( 1 ) a dictionary that maps packages to
certs ( based on the " certificate " and " private_key " attributes in the file ;
( 2 ) a string representing the extension of compressed APKs in the target files
( e . g " .gz " , " .bro " ) .
Args :
tf_zip : The input target_files ZipFile ( already open ) .
Returns :
( certmap , ext ) : certmap is a dictionary that maps packages to certs ; ext is
the extension string of compressed APKs ( e . g . " .gz " ) , or None if there ' s
no compressed APKs .
"""
2009-12-16 00:06:55 +01:00
certmap = { }
2017-08-14 15:49:21 +02:00
compressed_extension = None
2017-09-09 04:02:54 +02:00
# META/apkcerts.txt contains the info for _all_ the packages known at build
# time. Filter out the ones that are not installed.
installed_files = set ( )
for name in tf_zip . namelist ( ) :
basename = os . path . basename ( name )
if basename :
installed_files . add ( basename )
2009-12-16 00:06:55 +01:00
for line in tf_zip . read ( " META/apkcerts.txt " ) . split ( " \n " ) :
line = line . strip ( )
2015-03-24 03:13:21 +01:00
if not line :
continue
2018-01-05 20:17:34 +01:00
m = re . match (
r ' ^name= " (?P<NAME>.*) " \ s+certificate= " (?P<CERT>.*) " \ s+ '
r ' private_key= " (?P<PRIVKEY>.*?) " ( \ s+compressed= " (?P<COMPRESSED>.*) " )?$ ' ,
line )
if not m :
continue
matches = m . groupdict ( )
cert = matches [ " CERT " ]
privkey = matches [ " PRIVKEY " ]
name = matches [ " NAME " ]
this_compressed_extension = matches [ " COMPRESSED " ]
public_key_suffix_len = len ( OPTIONS . public_key_suffix )
private_key_suffix_len = len ( OPTIONS . private_key_suffix )
if cert in SPECIAL_CERT_STRINGS and not privkey :
certmap [ name ] = cert
elif ( cert . endswith ( OPTIONS . public_key_suffix ) and
privkey . endswith ( OPTIONS . private_key_suffix ) and
cert [ : - public_key_suffix_len ] == privkey [ : - private_key_suffix_len ] ) :
certmap [ name ] = cert [ : - public_key_suffix_len ]
else :
raise ValueError ( " Failed to parse line from apkcerts.txt: \n " + line )
if not this_compressed_extension :
continue
# Only count the installed files.
filename = name + ' . ' + this_compressed_extension
if filename not in installed_files :
continue
# Make sure that all the values in the compression map have the same
# extension. We don't support multiple compression methods in the same
# system image.
if compressed_extension :
if this_compressed_extension != compressed_extension :
raise ValueError (
" Multiple compressed extensions: {} vs {} " . format (
compressed_extension , this_compressed_extension ) )
else :
compressed_extension = this_compressed_extension
return ( certmap ,
( " . " + compressed_extension ) if compressed_extension else None )
2009-12-16 00:06:55 +01:00
2009-04-02 21:14:19 +02:00
COMMON_DOCSTRING = """
- p ( - - path ) < dir >
2009-06-18 17:35:12 +02:00
Prepend < dir > / bin to the list of places to search for binaries
run by this script , and expect to find jars in < dir > / framework .
2009-04-02 21:14:19 +02:00
2009-06-22 20:32:31 +02:00
- s ( - - device_specific ) < file >
Path to the python module containing device - specific
releasetools code .
2009-12-01 00:37:14 +01:00
- x ( - - extra ) < key = value >
Add a key / value pair to the ' extras ' dict , which device - specific
extension code may look at .
2009-04-02 21:14:19 +02:00
- v ( - - verbose )
Show command lines being executed .
- h ( - - help )
Display this usage message and exit .
"""
def Usage ( docstring ) :
2017-01-10 19:47:58 +01:00
print ( docstring . rstrip ( " \n " ) )
print ( COMMON_DOCSTRING )
2009-04-02 21:14:19 +02:00
def ParseOptions ( argv ,
docstring ,
extra_opts = " " , extra_long_opts = ( ) ,
extra_option_handler = None ) :
""" Parse the options in argv and return any arguments that aren ' t
flags . docstring is the calling module ' s docstring, to be displayed
for errors and - h . extra_opts and extra_long_opts are for flags
defined by the caller , which are processed by passing them to
extra_option_handler . """
try :
opts , args = getopt . getopt (
2009-12-01 00:37:14 +01:00
argv , " hvp:s:x: " + extra_opts ,
2015-12-10 22:38:50 +01:00
[ " help " , " verbose " , " path= " , " signapk_path= " ,
" signapk_shared_library_path= " , " extra_signapk_args= " ,
2014-09-06 02:36:20 +02:00
" java_path= " , " java_args= " , " public_key_suffix= " ,
2015-06-10 00:48:14 +02:00
" private_key_suffix= " , " boot_signer_path= " , " boot_signer_args= " ,
" verity_signer_path= " , " verity_signer_args= " , " device_specific= " ,
2014-11-20 18:52:05 +01:00
" extra= " ] +
2013-03-18 18:31:26 +01:00
list ( extra_long_opts ) )
2015-03-24 03:13:21 +01:00
except getopt . GetoptError as err :
2009-04-02 21:14:19 +02:00
Usage ( docstring )
2017-01-10 19:47:58 +01:00
print ( " ** " , str ( err ) , " ** " )
2009-04-02 21:14:19 +02:00
sys . exit ( 2 )
for o , a in opts :
if o in ( " -h " , " --help " ) :
Usage ( docstring )
sys . exit ( )
elif o in ( " -v " , " --verbose " ) :
OPTIONS . verbose = True
elif o in ( " -p " , " --path " ) :
2009-06-18 17:35:12 +02:00
OPTIONS . search_path = a
2013-03-18 18:31:26 +01:00
elif o in ( " --signapk_path " , ) :
OPTIONS . signapk_path = a
2015-12-10 22:38:50 +01:00
elif o in ( " --signapk_shared_library_path " , ) :
OPTIONS . signapk_shared_library_path = a
2013-03-18 18:31:26 +01:00
elif o in ( " --extra_signapk_args " , ) :
OPTIONS . extra_signapk_args = shlex . split ( a )
elif o in ( " --java_path " , ) :
OPTIONS . java_path = a
2014-09-05 20:18:07 +02:00
elif o in ( " --java_args " , ) :
2016-11-08 21:08:53 +01:00
OPTIONS . java_args = shlex . split ( a )
2013-03-18 18:31:26 +01:00
elif o in ( " --public_key_suffix " , ) :
OPTIONS . public_key_suffix = a
elif o in ( " --private_key_suffix " , ) :
OPTIONS . private_key_suffix = a
2014-11-20 18:52:05 +01:00
elif o in ( " --boot_signer_path " , ) :
OPTIONS . boot_signer_path = a
2015-06-10 00:48:14 +02:00
elif o in ( " --boot_signer_args " , ) :
OPTIONS . boot_signer_args = shlex . split ( a )
elif o in ( " --verity_signer_path " , ) :
OPTIONS . verity_signer_path = a
elif o in ( " --verity_signer_args " , ) :
OPTIONS . verity_signer_args = shlex . split ( a )
2009-06-22 20:32:31 +02:00
elif o in ( " -s " , " --device_specific " ) :
OPTIONS . device_specific = a
2009-12-04 01:36:20 +01:00
elif o in ( " -x " , " --extra " ) :
2009-12-01 00:37:14 +01:00
key , value = a . split ( " = " , 1 )
OPTIONS . extras [ key ] = value
2009-04-02 21:14:19 +02:00
else :
if extra_option_handler is None or not extra_option_handler ( o , a ) :
assert False , " unknown option \" %s \" " % ( o , )
2014-09-09 23:59:20 +02:00
if OPTIONS . search_path :
os . environ [ " PATH " ] = ( os . path . join ( OPTIONS . search_path , " bin " ) +
os . pathsep + os . environ [ " PATH " ] )
2009-04-02 21:14:19 +02:00
return args
2016-09-19 22:54:38 +02:00
def MakeTempFile ( prefix = ' tmp ' , suffix = ' ' ) :
2014-08-26 22:10:25 +02:00
""" Make a temp file and add it to the list of things to be deleted
when Cleanup ( ) is called . Return the filename . """
fd , fn = tempfile . mkstemp ( prefix = prefix , suffix = suffix )
os . close ( fd )
OPTIONS . tempfiles . append ( fn )
return fn
2017-12-25 19:43:47 +01:00
def MakeTempDir ( prefix = ' tmp ' , suffix = ' ' ) :
""" Makes a temporary dir that will be cleaned up with a call to Cleanup().
Returns :
The absolute pathname of the new directory .
"""
dir_name = tempfile . mkdtemp ( suffix = suffix , prefix = prefix )
OPTIONS . tempfiles . append ( dir_name )
return dir_name
2009-04-02 21:14:19 +02:00
def Cleanup ( ) :
for i in OPTIONS . tempfiles :
if os . path . isdir ( i ) :
2017-12-25 19:43:47 +01:00
shutil . rmtree ( i , ignore_errors = True )
2009-04-02 21:14:19 +02:00
else :
os . remove ( i )
2017-12-25 19:43:47 +01:00
del OPTIONS . tempfiles [ : ]
2009-05-22 22:34:54 +02:00
class PasswordManager ( object ) :
def __init__ ( self ) :
self . editor = os . getenv ( " EDITOR " , None )
self . pwfile = os . getenv ( " ANDROID_PW_FILE " , None )
def GetPasswords ( self , items ) :
""" Get passwords corresponding to each string in ' items ' ,
returning a dict . ( The dict may have keys in addition to the
values in ' items ' . )
Uses the passwords in $ ANDROID_PW_FILE if available , letting the
user edit that file to add more needed passwords . If no editor is
available , or $ ANDROID_PW_FILE isn ' t define, prompts the user
interactively in the ordinary way .
"""
current = self . ReadFile ( )
first = True
while True :
missing = [ ]
for i in items :
if i not in current or not current [ i ] :
missing . append ( i )
# Are all the passwords already in the file?
2015-03-24 03:13:21 +01:00
if not missing :
return current
2009-05-22 22:34:54 +02:00
for i in missing :
current [ i ] = " "
if not first :
2017-01-10 19:47:58 +01:00
print ( " key file %s still missing some passwords. " % ( self . pwfile , ) )
2009-05-22 22:34:54 +02:00
answer = raw_input ( " try to edit again? [y]> " ) . strip ( )
if answer and answer [ 0 ] not in ' yY ' :
raise RuntimeError ( " key passwords unavailable " )
first = False
current = self . UpdateAndReadFile ( current )
2015-03-24 03:13:21 +01:00
def PromptResult ( self , current ) : # pylint: disable=no-self-use
2009-05-22 22:34:54 +02:00
""" Prompt the user to enter a value (password) for each key in
' current ' whose value is fales . Returns a new dict with all the
values .
"""
result = { }
for k , v in sorted ( current . iteritems ( ) ) :
if v :
result [ k ] = v
else :
while True :
2015-03-24 03:13:21 +01:00
result [ k ] = getpass . getpass (
" Enter password for %s key> " % k ) . strip ( )
if result [ k ] :
break
2009-05-22 22:34:54 +02:00
return result
def UpdateAndReadFile ( self , current ) :
if not self . editor or not self . pwfile :
return self . PromptResult ( current )
f = open ( self . pwfile , " w " )
2015-03-24 03:13:21 +01:00
os . chmod ( self . pwfile , 0o600 )
2009-05-22 22:34:54 +02:00
f . write ( " # Enter key passwords between the [[[ ]]] brackets. \n " )
f . write ( " # (Additional spaces are harmless.) \n \n " )
first_line = None
2015-03-24 03:13:21 +01:00
sorted_list = sorted ( [ ( not v , k , v ) for ( k , v ) in current . iteritems ( ) ] )
for i , ( _ , k , v ) in enumerate ( sorted_list ) :
2009-05-22 22:34:54 +02:00
f . write ( " [[[ %s ]]] %s \n " % ( v , k ) )
if not v and first_line is None :
# position cursor on first line with no password.
first_line = i + 4
f . close ( )
p = Run ( [ self . editor , " + %d " % ( first_line , ) , self . pwfile ] )
_ , _ = p . communicate ( )
return self . ReadFile ( )
def ReadFile ( self ) :
result = { }
2015-03-24 03:13:21 +01:00
if self . pwfile is None :
return result
2009-05-22 22:34:54 +02:00
try :
f = open ( self . pwfile , " r " )
for line in f :
line = line . strip ( )
2015-03-24 03:13:21 +01:00
if not line or line [ 0 ] == ' # ' :
continue
2009-05-22 22:34:54 +02:00
m = re . match ( r " ^ \ [ \ [ \ [ \ s*(.*?) \ s* \ ] \ ] \ ] \ s*( \ S+)$ " , line )
if not m :
2017-01-10 19:47:58 +01:00
print ( " failed to parse password file: " , line )
2009-05-22 22:34:54 +02:00
else :
result [ m . group ( 2 ) ] = m . group ( 1 )
f . close ( )
2015-03-24 03:13:21 +01:00
except IOError as e :
2009-05-22 22:34:54 +02:00
if e . errno != errno . ENOENT :
2017-01-10 19:47:58 +01:00
print ( " error reading password file: " , str ( e ) )
2009-05-22 22:34:54 +02:00
return result
2009-06-15 23:31:53 +02:00
2015-01-28 00:53:15 +01:00
def ZipWrite ( zip_file , filename , arcname = None , perms = 0o644 ,
compress_type = None ) :
import datetime
# http://b/18015246
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
# for files larger than 2GiB. We can work around this by adjusting their
# limit. Note that `zipfile.writestr()` will not work for strings larger than
# 2GiB. The Python interpreter sometimes rejects strings that large (though
# it isn't clear to me exactly what circumstances cause this).
# `zipfile.write()` must be used directly to work around this.
#
# This mess can be avoided if we port to python3.
saved_zip64_limit = zipfile . ZIP64_LIMIT
zipfile . ZIP64_LIMIT = ( 1 << 32 ) - 1
if compress_type is None :
compress_type = zip_file . compression
if arcname is None :
arcname = filename
saved_stat = os . stat ( filename )
try :
# `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
# file to be zipped and reset it when we're done.
os . chmod ( filename , perms )
# Use a fixed timestamp so the output is repeatable.
epoch = datetime . datetime . fromtimestamp ( 0 )
timestamp = ( datetime . datetime ( 2009 , 1 , 1 ) - epoch ) . total_seconds ( )
os . utime ( filename , ( timestamp , timestamp ) )
zip_file . write ( filename , arcname = arcname , compress_type = compress_type )
finally :
os . chmod ( filename , saved_stat . st_mode )
os . utime ( filename , ( saved_stat . st_atime , saved_stat . st_mtime ) )
zipfile . ZIP64_LIMIT = saved_zip64_limit
2015-05-20 18:32:18 +02:00
def ZipWriteStr ( zip_file , zinfo_or_arcname , data , perms = None ,
2015-04-01 20:21:55 +02:00
compress_type = None ) :
""" Wrap zipfile.writestr() function to work around the zip64 limit.
Even with the ZIP64_LIMIT workaround , it won ' t allow writing a string
longer than 2 GiB . It gives ' OverflowError: size does not fit in an int '
when calling crc32 ( bytes ) .
But it still works fine to write a shorter string into a large zip file .
We should use ZipWrite ( ) whenever possible , and only use ZipWriteStr ( )
when we know the string won ' t be too long.
"""
saved_zip64_limit = zipfile . ZIP64_LIMIT
zipfile . ZIP64_LIMIT = ( 1 << 32 ) - 1
if not isinstance ( zinfo_or_arcname , zipfile . ZipInfo ) :
zinfo = zipfile . ZipInfo ( filename = zinfo_or_arcname )
2015-03-24 03:13:21 +01:00
zinfo . compress_type = zip_file . compression
2015-05-20 18:32:18 +02:00
if perms is None :
2015-07-11 02:18:23 +02:00
perms = 0o100644
2014-02-07 04:45:10 +01:00
else :
2015-04-01 20:21:55 +02:00
zinfo = zinfo_or_arcname
# If compress_type is given, it overrides the value in zinfo.
if compress_type is not None :
zinfo . compress_type = compress_type
2015-05-20 18:32:18 +02:00
# If perms is given, it has a priority.
if perms is not None :
2015-07-11 02:18:23 +02:00
# If perms doesn't set the file type, mark it as a regular file.
if perms & 0o770000 == 0 :
perms | = 0o100000
2015-05-20 18:32:18 +02:00
zinfo . external_attr = perms << 16
2015-04-01 20:21:55 +02:00
# Use a fixed timestamp so the output is repeatable.
zinfo . date_time = ( 2009 , 1 , 1 , 0 , 0 , 0 )
2015-03-24 03:13:21 +01:00
zip_file . writestr ( zinfo , data )
2015-04-01 20:21:55 +02:00
zipfile . ZIP64_LIMIT = saved_zip64_limit
2017-12-15 02:05:33 +01:00
def ZipDelete ( zip_filename , entries ) :
""" Deletes entries from a ZIP file.
Since deleting entries from a ZIP file is not supported , it shells out to
' zip -d ' .
Args :
zip_filename : The name of the ZIP file .
entries : The name of the entry , or the list of names to be deleted .
Raises :
AssertionError : In case of non - zero return from ' zip ' .
"""
if isinstance ( entries , basestring ) :
entries = [ entries ]
cmd = [ " zip " , " -d " , zip_filename ] + entries
proc = Run ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT )
stdoutdata , _ = proc . communicate ( )
assert proc . returncode == 0 , " Failed to delete %s : \n %s " % ( entries ,
stdoutdata )
2015-04-01 20:21:55 +02:00
def ZipClose ( zip_file ) :
# http://b/18015246
# zipfile also refers to ZIP64_LIMIT during close() when it writes out the
# central directory.
saved_zip64_limit = zipfile . ZIP64_LIMIT
zipfile . ZIP64_LIMIT = ( 1 << 32 ) - 1
zip_file . close ( )
zipfile . ZIP64_LIMIT = saved_zip64_limit
2009-06-22 20:32:31 +02:00
class DeviceSpecificParams ( object ) :
module = None
def __init__ ( self , * * kwargs ) :
""" Keyword arguments to the constructor become attributes of this
object , which is passed to all functions in the device - specific
module . """
for k , v in kwargs . iteritems ( ) :
setattr ( self , k , v )
2009-12-01 00:37:14 +01:00
self . extras = OPTIONS . extras
2009-06-22 20:32:31 +02:00
if self . module is None :
path = OPTIONS . device_specific
2015-03-24 03:13:21 +01:00
if not path :
return
2009-06-24 23:34:57 +02:00
try :
if os . path . isdir ( path ) :
info = imp . find_module ( " releasetools " , [ path ] )
else :
d , f = os . path . split ( path )
b , x = os . path . splitext ( f )
if x == " .py " :
f = b
info = imp . find_module ( f , [ d ] )
2017-01-10 19:47:58 +01:00
print ( " loaded device-specific extensions from " , path )
2009-06-24 23:34:57 +02:00
self . module = imp . load_module ( " device_specific " , * info )
except ImportError :
2017-01-10 19:47:58 +01:00
print ( " unable to load device-specific module; assuming none " )
2009-06-22 20:32:31 +02:00
def _DoCall ( self , function_name , * args , * * kwargs ) :
""" Call the named function in the device-specific module, passing
the given args and kwargs . The first argument to the call will be
the DeviceSpecific object itself . If there is no module , or the
module does not define the function , return the value of the
' default ' kwarg ( which itself defaults to None ) . """
if self . module is None or not hasattr ( self . module , function_name ) :
return kwargs . get ( " default " , None )
return getattr ( self . module , function_name ) ( * ( ( self , ) + args ) , * * kwargs )
def FullOTA_Assertions ( self ) :
""" Called after emitting the block of assertions at the top of a
full OTA package . Implementations can add whatever additional
assertions they like . """
return self . _DoCall ( " FullOTA_Assertions " )
2012-01-17 19:55:37 +01:00
def FullOTA_InstallBegin ( self ) :
""" Called at the start of full OTA installation. """
return self . _DoCall ( " FullOTA_InstallBegin " )
2009-06-22 20:32:31 +02:00
def FullOTA_InstallEnd ( self ) :
""" Called at the end of full OTA installation; typically this is
used to install the image for the device ' s baseband processor. " " "
return self . _DoCall ( " FullOTA_InstallEnd " )
def IncrementalOTA_Assertions ( self ) :
""" Called after emitting the block of assertions at the top of an
incremental OTA package . Implementations can add whatever
additional assertions they like . """
return self . _DoCall ( " IncrementalOTA_Assertions " )
2012-01-17 19:55:37 +01:00
def IncrementalOTA_VerifyBegin ( self ) :
""" Called at the start of the verification phase of incremental
OTA installation ; additional checks can be placed here to abort
the script before any changes are made . """
return self . _DoCall ( " IncrementalOTA_VerifyBegin " )
2009-06-22 20:32:31 +02:00
def IncrementalOTA_VerifyEnd ( self ) :
""" Called at the end of the verification phase of incremental OTA
installation ; additional checks can be placed here to abort the
script before any changes are made . """
return self . _DoCall ( " IncrementalOTA_VerifyEnd " )
2012-01-17 19:55:37 +01:00
def IncrementalOTA_InstallBegin ( self ) :
""" Called at the start of incremental OTA installation (after
verification is complete ) . """
return self . _DoCall ( " IncrementalOTA_InstallBegin " )
2009-06-22 20:32:31 +02:00
def IncrementalOTA_InstallEnd ( self ) :
""" Called at the end of incremental OTA installation; typically
this is used to install the image for the device ' s baseband
processor . """
return self . _DoCall ( " IncrementalOTA_InstallEnd " )
2010-09-13 00:26:16 +02:00
2015-11-10 01:58:28 +01:00
def VerifyOTA_Assertions ( self ) :
return self . _DoCall ( " VerifyOTA_Assertions " )
2010-09-13 00:26:16 +02:00
class File ( object ) :
2016-10-13 06:40:46 +02:00
def __init__ ( self , name , data , compress_size = None ) :
2010-09-13 00:26:16 +02:00
self . name = name
self . data = data
self . size = len ( data )
2016-10-13 06:40:46 +02:00
self . compress_size = compress_size or self . size
2011-01-26 02:03:34 +01:00
self . sha1 = sha1 ( data ) . hexdigest ( )
@classmethod
def FromLocalFile ( cls , name , diskname ) :
f = open ( diskname , " rb " )
data = f . read ( )
f . close ( )
return File ( name , data )
2010-09-13 00:26:16 +02:00
def WriteToTemp ( self ) :
t = tempfile . NamedTemporaryFile ( )
t . write ( self . data )
t . flush ( )
return t
2017-03-06 04:51:56 +01:00
def WriteToDir ( self , d ) :
with open ( os . path . join ( d , self . name ) , " wb " ) as fp :
fp . write ( self . data )
2014-02-07 04:45:10 +01:00
def AddToZip ( self , z , compression = None ) :
2015-04-01 20:21:55 +02:00
ZipWriteStr ( z , self . name , self . data , compress_type = compression )
2010-09-13 00:26:16 +02:00
DIFF_PROGRAM_BY_EXT = {
" .gz " : " imgdiff " ,
" .zip " : [ " imgdiff " , " -z " ] ,
" .jar " : [ " imgdiff " , " -z " ] ,
" .apk " : [ " imgdiff " , " -z " ] ,
" .img " : " imgdiff " ,
}
class Difference ( object ) :
2012-08-15 01:36:15 +02:00
def __init__ ( self , tf , sf , diff_program = None ) :
2010-09-13 00:26:16 +02:00
self . tf = tf
self . sf = sf
self . patch = None
2012-08-15 01:36:15 +02:00
self . diff_program = diff_program
2010-09-13 00:26:16 +02:00
def ComputePatch ( self ) :
""" Compute the patch (as a string of data) needed to turn sf into
tf . Returns the same tuple as GetPatch ( ) . """
tf = self . tf
sf = self . sf
2012-08-15 01:36:15 +02:00
if self . diff_program :
diff_program = self . diff_program
else :
ext = os . path . splitext ( tf . name ) [ 1 ]
diff_program = DIFF_PROGRAM_BY_EXT . get ( ext , " bsdiff " )
2010-09-13 00:26:16 +02:00
ttemp = tf . WriteToTemp ( )
stemp = sf . WriteToTemp ( )
ext = os . path . splitext ( tf . name ) [ 1 ]
try :
ptemp = tempfile . NamedTemporaryFile ( )
if isinstance ( diff_program , list ) :
cmd = copy . copy ( diff_program )
else :
cmd = [ diff_program ]
cmd . append ( stemp . name )
cmd . append ( ttemp . name )
cmd . append ( ptemp . name )
p = Run ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
2014-08-05 19:39:37 +02:00
err = [ ]
def run ( ) :
_ , e = p . communicate ( )
2015-03-24 03:13:21 +01:00
if e :
err . append ( e )
2014-08-05 19:39:37 +02:00
th = threading . Thread ( target = run )
th . start ( )
th . join ( timeout = 300 ) # 5 mins
if th . is_alive ( ) :
2017-01-10 19:47:58 +01:00
print ( " WARNING: diff command timed out " )
2014-08-05 19:39:37 +02:00
p . terminate ( )
th . join ( 5 )
if th . is_alive ( ) :
p . kill ( )
th . join ( )
2010-09-13 00:26:16 +02:00
if err or p . returncode != 0 :
2017-01-10 19:47:58 +01:00
print ( " WARNING: failure running %s : \n %s \n " % (
diff_program , " " . join ( err ) ) )
2014-08-05 19:39:37 +02:00
self . patch = None
return None , None , None
2010-09-13 00:26:16 +02:00
diff = ptemp . read ( )
finally :
ptemp . close ( )
stemp . close ( )
ttemp . close ( )
self . patch = diff
return self . tf , self . sf , self . patch
def GetPatch ( self ) :
""" Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn ' t been called, or if
computing the patch failed . """
return self . tf , self . sf , self . patch
def ComputeDifferences ( diffs ) :
""" Call ComputePatch on all the Difference objects in ' diffs ' . """
2017-01-10 19:47:58 +01:00
print ( len ( diffs ) , " diffs to compute " )
2010-09-13 00:26:16 +02:00
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [ ( i . tf . size , i ) for i in diffs ]
by_size . sort ( reverse = True )
by_size = [ i [ 1 ] for i in by_size ]
lock = threading . Lock ( )
diff_iter = iter ( by_size ) # accessed under lock
def worker ( ) :
try :
lock . acquire ( )
for d in diff_iter :
lock . release ( )
start = time . time ( )
d . ComputePatch ( )
dur = time . time ( ) - start
lock . acquire ( )
tf , sf , patch = d . GetPatch ( )
if sf . name == tf . name :
name = tf . name
else :
name = " %s ( %s ) " % ( tf . name , sf . name )
if patch is None :
2017-01-10 19:47:58 +01:00
print ( " patching failed! %s " % ( name , ) )
2010-09-13 00:26:16 +02:00
else :
2017-01-10 19:47:58 +01:00
print ( " %8.2f sec %8d / %8d bytes ( %6.2f %% ) %s " % (
dur , len ( patch ) , tf . size , 100.0 * len ( patch ) / tf . size , name ) )
2010-09-13 00:26:16 +02:00
lock . release ( )
2015-03-24 03:13:21 +01:00
except Exception as e :
2017-01-10 19:47:58 +01:00
print ( e )
2010-09-13 00:26:16 +02:00
raise
# start worker threads; wait for them all to finish.
threads = [ threading . Thread ( target = worker )
for i in range ( OPTIONS . worker_threads ) ]
for th in threads :
th . start ( )
while threads :
threads . pop ( ) . join ( )
2010-09-26 23:57:41 +02:00
2015-03-24 03:13:21 +01:00
class BlockDifference ( object ) :
def __init__ ( self , partition , tgt , src = None , check_first_block = False ,
2016-06-11 21:19:23 +02:00
version = None , disable_imgdiff = False ) :
2014-08-26 19:40:28 +02:00
self . tgt = tgt
self . src = src
self . partition = partition
2014-09-11 18:34:56 +02:00
self . check_first_block = check_first_block
2016-06-11 21:19:23 +02:00
self . disable_imgdiff = disable_imgdiff
2014-08-26 19:40:28 +02:00
2015-03-12 20:32:37 +01:00
if version is None :
2017-12-21 20:47:01 +01:00
version = max (
int ( i ) for i in
OPTIONS . info_dict . get ( " blockimgdiff_versions " , " 1 " ) . split ( " , " ) )
2017-03-01 23:36:26 +01:00
assert version > = 3
2015-03-12 20:32:37 +01:00
self . version = version
2014-09-08 17:29:55 +02:00
b = blockimgdiff . BlockImageDiff ( tgt , src , threads = OPTIONS . worker_threads ,
2016-06-11 21:19:23 +02:00
version = self . version ,
disable_imgdiff = self . disable_imgdiff )
2014-08-26 19:40:28 +02:00
tmpdir = tempfile . mkdtemp ( )
OPTIONS . tempfiles . append ( tmpdir )
self . path = os . path . join ( tmpdir , partition )
b . Compute ( self . path )
2016-02-04 23:26:02 +01:00
self . _required_cache = b . max_stashed_size
2016-04-13 00:53:16 +02:00
self . touched_src_ranges = b . touched_src_ranges
self . touched_src_sha1 = b . touched_src_sha1
2014-08-26 19:40:28 +02:00
2015-10-17 00:26:34 +02:00
if src is None :
_ , self . device = GetTypeAndDevice ( " / " + partition , OPTIONS . info_dict )
else :
_ , self . device = GetTypeAndDevice ( " / " + partition ,
OPTIONS . source_info_dict )
2014-08-26 19:40:28 +02:00
2016-02-04 23:26:02 +01:00
@property
def required_cache ( self ) :
return self . _required_cache
2014-08-26 19:40:28 +02:00
def WriteScript ( self , script , output_zip , progress = None ) :
if not self . src :
# write the output unconditionally
2015-01-06 19:59:53 +01:00
script . Print ( " Patching %s image unconditionally... " % ( self . partition , ) )
else :
script . Print ( " Patching %s image after verification. " % ( self . partition , ) )
2015-03-24 03:13:21 +01:00
if progress :
script . ShowProgress ( progress , 0 )
2015-01-06 19:59:53 +01:00
self . _WriteUpdate ( script , output_zip )
2016-03-25 23:01:33 +01:00
if OPTIONS . verify :
self . _WritePostInstallVerifyScript ( script )
2014-08-26 19:40:28 +02:00
2015-11-10 01:58:28 +01:00
def WriteStrictVerifyScript ( self , script ) :
""" Verify all the blocks in the care_map, including clobbered blocks.
This differs from the WriteVerifyScript ( ) function : a ) it prints different
error messages ; b ) it doesn ' t allow half-way updated images to pass the
verification . """
partition = self . partition
script . Print ( " Verifying %s ... " % ( partition , ) )
ranges = self . tgt . care_map
ranges_str = ranges . to_string_raw ( )
script . AppendExtra ( ' range_sha1( " %s " , " %s " ) == " %s " && '
' ui_print( " Verified. " ) || '
' ui_print( " \\ " %s \\ " has unexpected contents. " ); ' % (
self . device , ranges_str ,
self . tgt . TotalSha1 ( include_clobbered_blocks = True ) ,
self . device ) )
script . AppendExtra ( " " )
2016-04-13 00:53:16 +02:00
def WriteVerifyScript ( self , script , touched_blocks_only = False ) :
2014-12-09 17:40:34 +01:00
partition = self . partition
2016-04-15 00:58:05 +02:00
# full OTA
2015-01-06 19:59:53 +01:00
if not self . src :
2014-12-09 17:40:34 +01:00
script . Print ( " Image %s will be patched unconditionally. " % ( partition , ) )
2016-04-15 00:58:05 +02:00
# incremental OTA
2014-08-26 19:40:28 +02:00
else :
2017-03-01 23:36:26 +01:00
if touched_blocks_only :
2016-04-13 00:53:16 +02:00
ranges = self . touched_src_ranges
expected_sha1 = self . touched_src_sha1
else :
ranges = self . src . care_map . subtract ( self . src . clobbered_blocks )
expected_sha1 = self . src . TotalSha1 ( )
2016-04-15 00:58:05 +02:00
# No blocks to be checked, skipping.
if not ranges :
return
2015-05-12 20:42:31 +02:00
ranges_str = ranges . to_string_raw ( )
2017-03-01 23:36:26 +01:00
script . AppendExtra ( ( ' if (range_sha1( " %s " , " %s " ) == " %s " || '
' block_image_verify( " %s " , '
' package_extract_file( " %s .transfer.list " ), '
' " %s .new.dat " , " %s .patch.dat " )) then ' ) % (
self . device , ranges_str , expected_sha1 ,
self . device , partition , partition , partition ) )
2015-03-12 20:32:37 +01:00
script . Print ( ' Verified %s image... ' % ( partition , ) )
2015-03-24 03:13:21 +01:00
script . AppendExtra ( ' else ' )
2014-12-09 17:40:34 +01:00
2015-12-15 20:53:59 +01:00
if self . version > = 4 :
# Bug: 21124327
# When generating incrementals for the system and vendor partitions in
# version 4 or newer, explicitly check the first block (which contains
# the superblock) of the partition to see if it's what we expect. If
# this check fails, give an explicit log message about the partition
# having been remounted R/W (the most likely explanation).
if self . check_first_block :
script . AppendExtra ( ' check_first_block( " %s " ); ' % ( self . device , ) )
# If version >= 4, try block recovery before abort update
2016-05-25 02:34:52 +02:00
if partition == " system " :
code = ErrorCode . SYSTEM_RECOVER_FAILURE
else :
code = ErrorCode . VENDOR_RECOVER_FAILURE
2015-12-15 20:53:59 +01:00
script . AppendExtra ( (
' ifelse (block_image_recover( " {device} " , " {ranges} " ) && '
' block_image_verify( " {device} " , '
' package_extract_file( " {partition} .transfer.list " ), '
' " {partition} .new.dat " , " {partition} .patch.dat " ), '
' ui_print( " {partition} recovered successfully. " ), '
2016-05-25 02:34:52 +02:00
' abort( " E {code} : {partition} partition fails to recover " )); \n '
2015-12-15 20:53:59 +01:00
' endif; ' ) . format ( device = self . device , ranges = ranges_str ,
2016-05-25 02:34:52 +02:00
partition = partition , code = code ) )
2014-09-11 18:34:56 +02:00
2015-03-12 20:32:37 +01:00
# Abort the OTA update. Note that the incremental OTA cannot be applied
# even if it may match the checksum of the target partition.
# a) If version < 3, operations like move and erase will make changes
# unconditionally and damage the partition.
# b) If version >= 3, it won't even reach here.
2015-12-15 20:53:59 +01:00
else :
2016-05-25 02:34:52 +02:00
if partition == " system " :
code = ErrorCode . SYSTEM_VERIFICATION_FAILURE
else :
code = ErrorCode . VENDOR_VERIFICATION_FAILURE
script . AppendExtra ( (
' abort( " E %d : %s partition has unexpected contents " ); \n '
' endif; ' ) % ( code , partition ) )
2014-08-26 19:40:28 +02:00
2015-06-01 22:40:49 +02:00
def _WritePostInstallVerifyScript ( self , script ) :
partition = self . partition
script . Print ( ' Verifying the updated %s image... ' % ( partition , ) )
# Unlike pre-install verification, clobbered_blocks should not be ignored.
ranges = self . tgt . care_map
ranges_str = ranges . to_string_raw ( )
script . AppendExtra ( ' if range_sha1( " %s " , " %s " ) == " %s " then ' % (
self . device , ranges_str ,
self . tgt . TotalSha1 ( include_clobbered_blocks = True ) ) )
2015-07-10 02:37:49 +02:00
# Bug: 20881595
# Verify that extended blocks are really zeroed out.
if self . tgt . extended :
ranges_str = self . tgt . extended . to_string_raw ( )
script . AppendExtra ( ' if range_sha1( " %s " , " %s " ) == " %s " then ' % (
self . device , ranges_str ,
self . _HashZeroBlocks ( self . tgt . extended . size ( ) ) ) )
script . Print ( ' Verified the updated %s image. ' % ( partition , ) )
2016-05-25 02:34:52 +02:00
if partition == " system " :
code = ErrorCode . SYSTEM_NONZERO_CONTENTS
else :
code = ErrorCode . VENDOR_NONZERO_CONTENTS
2015-07-10 02:37:49 +02:00
script . AppendExtra (
' else \n '
2016-05-25 02:34:52 +02:00
' abort( " E %d : %s partition has unexpected non-zero contents after '
' OTA update " ); \n '
' endif; ' % ( code , partition ) )
2015-07-10 02:37:49 +02:00
else :
script . Print ( ' Verified the updated %s image. ' % ( partition , ) )
2016-05-25 02:34:52 +02:00
if partition == " system " :
code = ErrorCode . SYSTEM_UNEXPECTED_CONTENTS
else :
code = ErrorCode . VENDOR_UNEXPECTED_CONTENTS
2015-06-01 22:40:49 +02:00
script . AppendExtra (
' else \n '
2016-05-25 02:34:52 +02:00
' abort( " E %d : %s partition has unexpected contents after OTA '
' update " ); \n '
' endif; ' % ( code , partition ) )
2015-06-01 22:40:49 +02:00
2014-08-26 19:40:28 +02:00
def _WriteUpdate ( self , script , output_zip ) :
2015-01-28 00:53:15 +01:00
ZipWrite ( output_zip ,
' {} .transfer.list ' . format ( self . path ) ,
' {} .transfer.list ' . format ( self . partition ) )
2017-07-07 00:13:59 +02:00
# For full OTA, compress the new.dat with brotli with quality 6 to reduce its size. Quailty 9
# almost triples the compression time but doesn't further reduce the size too much.
# For a typical 1.8G system.new.dat
# zip | brotli(quality 6) | brotli(quality 9)
# compressed_size: 942M | 869M (~8% reduced) | 854M
# compression_time: 75s | 265s | 719s
# decompression_time: 15s | 25s | 25s
if not self . src :
2017-11-09 23:53:42 +01:00
brotli_cmd = [ ' brotli ' , ' --quality=6 ' ,
' --output= {} .new.dat.br ' . format ( self . path ) ,
' {} .new.dat ' . format ( self . path ) ]
2017-07-07 00:13:59 +02:00
print ( " Compressing {} .new.dat with brotli " . format ( self . partition ) )
2017-11-09 23:53:42 +01:00
p = Run ( brotli_cmd , stdout = subprocess . PIPE )
2017-07-07 00:13:59 +02:00
p . communicate ( )
assert p . returncode == 0 , \
' compression of {} .new.dat failed ' . format ( self . partition )
new_data_name = ' {} .new.dat.br ' . format ( self . partition )
ZipWrite ( output_zip ,
' {} .new.dat.br ' . format ( self . path ) ,
new_data_name ,
compress_type = zipfile . ZIP_STORED )
else :
new_data_name = ' {} .new.dat ' . format ( self . partition )
ZipWrite ( output_zip , ' {} .new.dat ' . format ( self . path ) , new_data_name )
2015-01-28 00:53:15 +01:00
ZipWrite ( output_zip ,
' {} .patch.dat ' . format ( self . path ) ,
' {} .patch.dat ' . format ( self . partition ) ,
compress_type = zipfile . ZIP_STORED )
2016-05-25 02:34:52 +02:00
if self . partition == " system " :
code = ErrorCode . SYSTEM_UPDATE_FAILURE
else :
code = ErrorCode . VENDOR_UPDATE_FAILURE
2015-01-28 00:53:15 +01:00
call = ( ' block_image_update( " {device} " , '
' package_extract_file( " {partition} .transfer.list " ), '
2017-07-07 00:13:59 +02:00
' " {new_data_name} " , " {partition} .patch.dat " ) || \n '
2016-05-25 02:34:52 +02:00
' abort( " E {code} : Failed to update {partition} image. " ); ' . format (
2017-07-07 00:13:59 +02:00
device = self . device , partition = self . partition ,
new_data_name = new_data_name , code = code ) )
2015-03-24 03:13:21 +01:00
script . AppendExtra ( script . WordWrap ( call ) )
2014-08-26 19:40:28 +02:00
2015-03-24 03:13:21 +01:00
def _HashBlocks ( self , source , ranges ) : # pylint: disable=no-self-use
2014-12-09 17:40:34 +01:00
data = source . ReadRangeSet ( ranges )
ctx = sha1 ( )
for p in data :
ctx . update ( p )
return ctx . hexdigest ( )
2015-07-10 02:37:49 +02:00
def _HashZeroBlocks ( self , num_blocks ) : # pylint: disable=no-self-use
""" Return the hash value for all zero blocks. """
zero_block = ' \x00 ' * 4096
ctx = sha1 ( )
for _ in range ( num_blocks ) :
ctx . update ( zero_block )
return ctx . hexdigest ( )
2014-08-26 19:40:28 +02:00
DataImage = blockimgdiff . DataImage
2010-09-26 23:57:41 +02:00
# map recovery.fstab's fs_types to mount/format "partition types"
2015-03-24 03:13:21 +01:00
PARTITION_TYPES = {
" ext4 " : " EMMC " ,
" emmc " : " EMMC " ,
2015-05-02 00:39:36 +02:00
" f2fs " : " EMMC " ,
" squashfs " : " EMMC "
2015-03-24 03:13:21 +01:00
}
2010-09-26 23:57:41 +02:00
def GetTypeAndDevice ( mount_point , info ) :
fstab = info [ " fstab " ]
if fstab :
2015-03-24 03:13:21 +01:00
return ( PARTITION_TYPES [ fstab [ mount_point ] . fs_type ] ,
fstab [ mount_point ] . device )
2010-09-26 23:57:41 +02:00
else :
2015-03-24 03:13:21 +01:00
raise KeyError
2013-11-13 01:22:34 +01:00
def ParseCertificate ( data ) :
""" Parse a PEM-format certificate. """
cert = [ ]
save = False
for line in data . split ( " \n " ) :
if " --END CERTIFICATE-- " in line :
break
if save :
cert . append ( line )
if " --BEGIN CERTIFICATE-- " in line :
save = True
cert = " " . join ( cert ) . decode ( ' base64 ' )
return cert
2014-02-04 21:17:58 +01:00
2014-02-13 19:58:24 +01:00
def MakeRecoveryPatch ( input_dir , output_sink , recovery_img , boot_img ,
info_dict = None ) :
2014-02-04 21:17:58 +01:00
""" Generate a binary patch that creates the recovery image starting
with the boot image . ( Most of the space in these images is just the
kernel , which is identical for the two , so the resulting patch
should be efficient . ) Add it to the output zip , along with a shell
script that is run from init . rc on first boot to actually do the
patching and install the new recovery image .
recovery_img and boot_img should be File objects for the
corresponding images . info should be the dictionary returned by
common . LoadInfoDict ( ) on the input target_files .
"""
2014-02-13 19:58:24 +01:00
if info_dict is None :
info_dict = OPTIONS . info_dict
2015-07-22 21:33:18 +02:00
full_recovery_image = info_dict . get ( " full_recovery_image " , None ) == " true "
if full_recovery_image :
output_sink ( " etc/recovery.img " , recovery_img . data )
2014-02-04 21:17:58 +01:00
else :
2015-07-22 21:33:18 +02:00
diff_program = [ " imgdiff " ]
path = os . path . join ( input_dir , " SYSTEM " , " etc " , " recovery-resource.dat " )
if os . path . exists ( path ) :
diff_program . append ( " -b " )
diff_program . append ( path )
bonus_args = " -b /system/etc/recovery-resource.dat "
else :
bonus_args = " "
2014-02-04 21:17:58 +01:00
2015-07-22 21:33:18 +02:00
d = Difference ( recovery_img , boot_img , diff_program = diff_program )
_ , _ , patch = d . ComputePatch ( )
output_sink ( " recovery-from-boot.p " , patch )
2014-02-04 21:17:58 +01:00
2015-03-28 03:11:53 +01:00
try :
2015-10-14 01:37:12 +02:00
# The following GetTypeAndDevice()s need to use the path in the target
# info_dict instead of source_info_dict.
2015-03-28 03:11:53 +01:00
boot_type , boot_device = GetTypeAndDevice ( " /boot " , info_dict )
recovery_type , recovery_device = GetTypeAndDevice ( " /recovery " , info_dict )
except KeyError :
2014-07-29 20:42:37 +02:00
return
2014-02-04 21:17:58 +01:00
2015-07-22 21:33:18 +02:00
if full_recovery_image :
sh = """ #!/system/bin/sh
if ! applypatch - c % ( type ) s : % ( device ) s : % ( size ) d : % ( sha1 ) s ; then
applypatch / system / etc / recovery . img % ( type ) s : % ( device ) s % ( sha1 ) s % ( size ) d & & log - t recovery " Installing new recovery image: succeeded " | | log - t recovery " Installing new recovery image: failed "
else
log - t recovery " Recovery image already installed "
fi
""" % { ' type ' : recovery_type,
' device ' : recovery_device ,
' sha1 ' : recovery_img . sha1 ,
' size ' : recovery_img . size }
else :
sh = """ #!/system/bin/sh
2014-02-04 21:17:58 +01:00
if ! applypatch - c % ( recovery_type ) s : % ( recovery_device ) s : % ( recovery_size ) d : % ( recovery_sha1 ) s ; then
applypatch % ( bonus_args ) s % ( boot_type ) s : % ( boot_device ) s : % ( boot_size ) d : % ( boot_sha1 ) s % ( recovery_type ) s : % ( recovery_device ) s % ( recovery_sha1 ) s % ( recovery_size ) d % ( boot_sha1 ) s : / system / recovery - from - boot . p & & log - t recovery " Installing new recovery image: succeeded " | | log - t recovery " Installing new recovery image: failed "
else
log - t recovery " Recovery image already installed "
fi
2015-03-24 03:13:21 +01:00
""" % { ' boot_size ' : boot_img.size,
' boot_sha1 ' : boot_img . sha1 ,
' recovery_size ' : recovery_img . size ,
' recovery_sha1 ' : recovery_img . sha1 ,
' boot_type ' : boot_type ,
' boot_device ' : boot_device ,
' recovery_type ' : recovery_type ,
' recovery_device ' : recovery_device ,
' bonus_args ' : bonus_args }
2014-02-04 21:17:58 +01:00
# The install script location moved from /system/etc to /system/bin
2017-06-21 01:52:54 +02:00
# in the L release.
sh_location = " bin/install-recovery.sh "
2015-07-08 03:31:47 +02:00
2017-01-10 19:47:58 +01:00
print ( " putting script in " , sh_location )
2014-02-04 21:17:58 +01:00
output_sink ( sh_location , sh )