Merge "Factor out the image classes to break circular dependency"
am: dcb6644719
Change-Id: I16371818e6e022ee75187b9c9e104836a72dd983
This commit is contained in:
commit
bcc3554b74
6 changed files with 240 additions and 217 deletions
|
@ -40,6 +40,7 @@ python_library_host {
|
|||
"common.py",
|
||||
"edify_generator.py",
|
||||
"img_from_target_files.py",
|
||||
"images.py",
|
||||
"make_recovery_patch.py",
|
||||
"merge_target_files.py",
|
||||
"ota_from_target_files.py",
|
||||
|
|
|
@ -28,12 +28,12 @@ import sys
|
|||
import threading
|
||||
import zlib
|
||||
from collections import deque, namedtuple, OrderedDict
|
||||
from hashlib import sha1
|
||||
|
||||
import common
|
||||
from images import EmptyImage
|
||||
from rangelib import RangeSet
|
||||
|
||||
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
|
||||
__all__ = ["BlockImageDiff"]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -60,209 +60,6 @@ def compute_patch(srcfile, tgtfile, imgdiff=False):
|
|||
return PatchInfo(imgdiff, f.read())
|
||||
|
||||
|
||||
class Image(object):
|
||||
def RangeSha1(self, ranges):
|
||||
raise NotImplementedError
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
raise NotImplementedError
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
raise NotImplementedError
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EmptyImage(Image):
|
||||
"""A zero-length image."""
|
||||
|
||||
def __init__(self):
|
||||
self.blocksize = 4096
|
||||
self.care_map = RangeSet()
|
||||
self.clobbered_blocks = RangeSet()
|
||||
self.extended = RangeSet()
|
||||
self.total_blocks = 0
|
||||
self.file_map = {}
|
||||
self.hashtree_info = None
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
return sha1().hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return ()
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
# EmptyImage always carries empty clobbered_blocks, so
|
||||
# include_clobbered_blocks can be ignored.
|
||||
assert self.clobbered_blocks.size() == 0
|
||||
return sha1().hexdigest()
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
raise ValueError("Can't write data from EmptyImage to file")
|
||||
|
||||
|
||||
class DataImage(Image):
|
||||
"""An image wrapped around a single string of data."""
|
||||
|
||||
def __init__(self, data, trim=False, pad=False):
|
||||
self.data = data
|
||||
self.blocksize = 4096
|
||||
|
||||
assert not (trim and pad)
|
||||
|
||||
partial = len(self.data) % self.blocksize
|
||||
padded = False
|
||||
if partial > 0:
|
||||
if trim:
|
||||
self.data = self.data[:-partial]
|
||||
elif pad:
|
||||
self.data += '\0' * (self.blocksize - partial)
|
||||
padded = True
|
||||
else:
|
||||
raise ValueError(("data for DataImage must be multiple of %d bytes "
|
||||
"unless trim or pad is specified") %
|
||||
(self.blocksize,))
|
||||
|
||||
assert len(self.data) % self.blocksize == 0
|
||||
|
||||
self.total_blocks = len(self.data) // self.blocksize
|
||||
self.care_map = RangeSet(data=(0, self.total_blocks))
|
||||
# When the last block is padded, we always write the whole block even for
|
||||
# incremental OTAs. Because otherwise the last block may get skipped if
|
||||
# unchanged for an incremental, but would fail the post-install
|
||||
# verification if it has non-zero contents in the padding bytes.
|
||||
# Bug: 23828506
|
||||
if padded:
|
||||
clobbered_blocks = [self.total_blocks-1, self.total_blocks]
|
||||
else:
|
||||
clobbered_blocks = []
|
||||
self.clobbered_blocks = clobbered_blocks
|
||||
self.extended = RangeSet()
|
||||
|
||||
zero_blocks = []
|
||||
nonzero_blocks = []
|
||||
reference = '\0' * self.blocksize
|
||||
|
||||
for i in range(self.total_blocks-1 if padded else self.total_blocks):
|
||||
d = self.data[i*self.blocksize : (i+1)*self.blocksize]
|
||||
if d == reference:
|
||||
zero_blocks.append(i)
|
||||
zero_blocks.append(i+1)
|
||||
else:
|
||||
nonzero_blocks.append(i)
|
||||
nonzero_blocks.append(i+1)
|
||||
|
||||
assert zero_blocks or nonzero_blocks or clobbered_blocks
|
||||
|
||||
self.file_map = dict()
|
||||
if zero_blocks:
|
||||
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
|
||||
if nonzero_blocks:
|
||||
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
|
||||
if clobbered_blocks:
|
||||
self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
|
||||
|
||||
def _GetRangeData(self, ranges):
|
||||
for s, e in ranges:
|
||||
yield self.data[s*self.blocksize:e*self.blocksize]
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
h = sha1()
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
h.update(data)
|
||||
return h.hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return list(self._GetRangeData(ranges))
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
if not include_clobbered_blocks:
|
||||
return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
|
||||
return sha1(self.data).hexdigest()
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
fd.write(data)
|
||||
|
||||
|
||||
class FileImage(Image):
|
||||
"""An image wrapped around a raw image file."""
|
||||
|
||||
def __init__(self, path, hashtree_info_generator=None):
|
||||
self.path = path
|
||||
self.blocksize = 4096
|
||||
self._file_size = os.path.getsize(self.path)
|
||||
self._file = open(self.path, 'rb')
|
||||
|
||||
if self._file_size % self.blocksize != 0:
|
||||
raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
|
||||
% self.path, self.blocksize, self._file_size)
|
||||
|
||||
self.total_blocks = self._file_size // self.blocksize
|
||||
self.care_map = RangeSet(data=(0, self.total_blocks))
|
||||
self.clobbered_blocks = RangeSet()
|
||||
self.extended = RangeSet()
|
||||
|
||||
self.generator_lock = threading.Lock()
|
||||
|
||||
self.hashtree_info = None
|
||||
if hashtree_info_generator:
|
||||
self.hashtree_info = hashtree_info_generator.Generate(self)
|
||||
|
||||
zero_blocks = []
|
||||
nonzero_blocks = []
|
||||
reference = '\0' * self.blocksize
|
||||
|
||||
for i in range(self.total_blocks):
|
||||
d = self._file.read(self.blocksize)
|
||||
if d == reference:
|
||||
zero_blocks.append(i)
|
||||
zero_blocks.append(i+1)
|
||||
else:
|
||||
nonzero_blocks.append(i)
|
||||
nonzero_blocks.append(i+1)
|
||||
|
||||
assert zero_blocks or nonzero_blocks
|
||||
|
||||
self.file_map = {}
|
||||
if zero_blocks:
|
||||
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
|
||||
if nonzero_blocks:
|
||||
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
|
||||
if self.hashtree_info:
|
||||
self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
|
||||
|
||||
def __del__(self):
|
||||
self._file.close()
|
||||
|
||||
def _GetRangeData(self, ranges):
|
||||
# Use a lock to protect the generator so that we will not run two
|
||||
# instances of this generator on the same object simultaneously.
|
||||
with self.generator_lock:
|
||||
for s, e in ranges:
|
||||
self._file.seek(s * self.blocksize)
|
||||
for _ in range(s, e):
|
||||
yield self._file.read(self.blocksize)
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
h = sha1()
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
h.update(data)
|
||||
return h.hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return list(self._GetRangeData(ranges))
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
assert not self.clobbered_blocks
|
||||
return self.RangeSha1(self.care_map)
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
fd.write(data)
|
||||
|
||||
|
||||
class Transfer(object):
|
||||
def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
|
||||
src_sha1, style, by_id):
|
||||
|
|
|
@ -39,8 +39,9 @@ import time
|
|||
import zipfile
|
||||
from hashlib import sha1, sha256
|
||||
|
||||
import blockimgdiff
|
||||
import images
|
||||
import sparse_img
|
||||
from blockimgdiff import BlockImageDiff
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -915,8 +916,8 @@ def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
|
|||
# ota_from_target_files.py (since LMP).
|
||||
assert os.path.exists(path) and os.path.exists(mappath)
|
||||
|
||||
return blockimgdiff.FileImage(path, hashtree_info_generator=
|
||||
hashtree_info_generator)
|
||||
return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
|
||||
|
||||
|
||||
def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
|
||||
hashtree_info_generator=None):
|
||||
|
@ -1916,9 +1917,9 @@ class BlockDifference(object):
|
|||
assert version >= 3
|
||||
self.version = version
|
||||
|
||||
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
|
||||
version=self.version,
|
||||
disable_imgdiff=self.disable_imgdiff)
|
||||
b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
|
||||
version=self.version,
|
||||
disable_imgdiff=self.disable_imgdiff)
|
||||
self.path = os.path.join(MakeTempDir(), partition)
|
||||
b.Compute(self.path)
|
||||
self._required_cache = b.max_stashed_size
|
||||
|
@ -2172,8 +2173,10 @@ class BlockDifference(object):
|
|||
return ctx.hexdigest()
|
||||
|
||||
|
||||
DataImage = blockimgdiff.DataImage
|
||||
EmptyImage = blockimgdiff.EmptyImage
|
||||
# Expose these two classes to support vendor-specific scripts
|
||||
DataImage = images.DataImage
|
||||
EmptyImage = images.EmptyImage
|
||||
|
||||
|
||||
# map recovery.fstab's fs_types to mount/format "partition types"
|
||||
PARTITION_TYPES = {
|
||||
|
|
223
tools/releasetools/images.py
Normal file
223
tools/releasetools/images.py
Normal file
|
@ -0,0 +1,223 @@
|
|||
# Copyright (C) 2019 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific
|
||||
|
||||
import os
|
||||
import threading
|
||||
from hashlib import sha1
|
||||
|
||||
from rangelib import RangeSet
|
||||
|
||||
__all__ = ["EmptyImage", "DataImage", "FileImage"]
|
||||
|
||||
|
||||
class Image(object):
|
||||
def RangeSha1(self, ranges):
|
||||
raise NotImplementedError
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
raise NotImplementedError
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
raise NotImplementedError
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EmptyImage(Image):
|
||||
"""A zero-length image."""
|
||||
|
||||
def __init__(self):
|
||||
self.blocksize = 4096
|
||||
self.care_map = RangeSet()
|
||||
self.clobbered_blocks = RangeSet()
|
||||
self.extended = RangeSet()
|
||||
self.total_blocks = 0
|
||||
self.file_map = {}
|
||||
self.hashtree_info = None
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
return sha1().hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return ()
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
# EmptyImage always carries empty clobbered_blocks, so
|
||||
# include_clobbered_blocks can be ignored.
|
||||
assert self.clobbered_blocks.size() == 0
|
||||
return sha1().hexdigest()
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
raise ValueError("Can't write data from EmptyImage to file")
|
||||
|
||||
|
||||
class DataImage(Image):
|
||||
"""An image wrapped around a single string of data."""
|
||||
|
||||
def __init__(self, data, trim=False, pad=False):
|
||||
self.data = data
|
||||
self.blocksize = 4096
|
||||
|
||||
assert not (trim and pad)
|
||||
|
||||
partial = len(self.data) % self.blocksize
|
||||
padded = False
|
||||
if partial > 0:
|
||||
if trim:
|
||||
self.data = self.data[:-partial]
|
||||
elif pad:
|
||||
self.data += '\0' * (self.blocksize - partial)
|
||||
padded = True
|
||||
else:
|
||||
raise ValueError(("data for DataImage must be multiple of %d bytes "
|
||||
"unless trim or pad is specified") %
|
||||
(self.blocksize,))
|
||||
|
||||
assert len(self.data) % self.blocksize == 0
|
||||
|
||||
self.total_blocks = len(self.data) // self.blocksize
|
||||
self.care_map = RangeSet(data=(0, self.total_blocks))
|
||||
# When the last block is padded, we always write the whole block even for
|
||||
# incremental OTAs. Because otherwise the last block may get skipped if
|
||||
# unchanged for an incremental, but would fail the post-install
|
||||
# verification if it has non-zero contents in the padding bytes.
|
||||
# Bug: 23828506
|
||||
if padded:
|
||||
clobbered_blocks = [self.total_blocks-1, self.total_blocks]
|
||||
else:
|
||||
clobbered_blocks = []
|
||||
self.clobbered_blocks = clobbered_blocks
|
||||
self.extended = RangeSet()
|
||||
|
||||
zero_blocks = []
|
||||
nonzero_blocks = []
|
||||
reference = '\0' * self.blocksize
|
||||
|
||||
for i in range(self.total_blocks-1 if padded else self.total_blocks):
|
||||
d = self.data[i*self.blocksize : (i+1)*self.blocksize]
|
||||
if d == reference:
|
||||
zero_blocks.append(i)
|
||||
zero_blocks.append(i+1)
|
||||
else:
|
||||
nonzero_blocks.append(i)
|
||||
nonzero_blocks.append(i+1)
|
||||
|
||||
assert zero_blocks or nonzero_blocks or clobbered_blocks
|
||||
|
||||
self.file_map = dict()
|
||||
if zero_blocks:
|
||||
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
|
||||
if nonzero_blocks:
|
||||
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
|
||||
if clobbered_blocks:
|
||||
self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
|
||||
|
||||
def _GetRangeData(self, ranges):
|
||||
for s, e in ranges:
|
||||
yield self.data[s*self.blocksize:e*self.blocksize]
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
h = sha1()
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
h.update(data)
|
||||
return h.hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return list(self._GetRangeData(ranges))
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
if not include_clobbered_blocks:
|
||||
return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
|
||||
return sha1(self.data).hexdigest()
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
fd.write(data)
|
||||
|
||||
|
||||
class FileImage(Image):
|
||||
"""An image wrapped around a raw image file."""
|
||||
|
||||
def __init__(self, path, hashtree_info_generator=None):
|
||||
self.path = path
|
||||
self.blocksize = 4096
|
||||
self._file_size = os.path.getsize(self.path)
|
||||
self._file = open(self.path, 'rb')
|
||||
|
||||
if self._file_size % self.blocksize != 0:
|
||||
raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
|
||||
% self.path, self.blocksize, self._file_size)
|
||||
|
||||
self.total_blocks = self._file_size // self.blocksize
|
||||
self.care_map = RangeSet(data=(0, self.total_blocks))
|
||||
self.clobbered_blocks = RangeSet()
|
||||
self.extended = RangeSet()
|
||||
|
||||
self.generator_lock = threading.Lock()
|
||||
|
||||
self.hashtree_info = None
|
||||
if hashtree_info_generator:
|
||||
self.hashtree_info = hashtree_info_generator.Generate(self)
|
||||
|
||||
zero_blocks = []
|
||||
nonzero_blocks = []
|
||||
reference = '\0' * self.blocksize
|
||||
|
||||
for i in range(self.total_blocks):
|
||||
d = self._file.read(self.blocksize)
|
||||
if d == reference:
|
||||
zero_blocks.append(i)
|
||||
zero_blocks.append(i+1)
|
||||
else:
|
||||
nonzero_blocks.append(i)
|
||||
nonzero_blocks.append(i+1)
|
||||
|
||||
assert zero_blocks or nonzero_blocks
|
||||
|
||||
self.file_map = {}
|
||||
if zero_blocks:
|
||||
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
|
||||
if nonzero_blocks:
|
||||
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
|
||||
if self.hashtree_info:
|
||||
self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
|
||||
|
||||
def __del__(self):
|
||||
self._file.close()
|
||||
|
||||
def _GetRangeData(self, ranges):
|
||||
# Use a lock to protect the generator so that we will not run two
|
||||
# instances of this generator on the same object simultaneously.
|
||||
with self.generator_lock:
|
||||
for s, e in ranges:
|
||||
self._file.seek(s * self.blocksize)
|
||||
for _ in range(s, e):
|
||||
yield self._file.read(self.blocksize)
|
||||
|
||||
def RangeSha1(self, ranges):
|
||||
h = sha1()
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
h.update(data)
|
||||
return h.hexdigest()
|
||||
|
||||
def ReadRangeSet(self, ranges):
|
||||
return list(self._GetRangeData(ranges))
|
||||
|
||||
def TotalSha1(self, include_clobbered_blocks=False):
|
||||
assert not self.clobbered_blocks
|
||||
return self.RangeSha1(self.care_map)
|
||||
|
||||
def WriteRangeDataToFd(self, ranges, fd):
|
||||
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||
fd.write(data)
|
|
@ -18,9 +18,8 @@ import os
|
|||
from hashlib import sha1
|
||||
|
||||
import common
|
||||
from blockimgdiff import (
|
||||
BlockImageDiff, DataImage, EmptyImage, FileImage, HeapItem, ImgdiffStats,
|
||||
Transfer)
|
||||
from blockimgdiff import BlockImageDiff, HeapItem, ImgdiffStats, Transfer
|
||||
from images import DataImage, EmptyImage, FileImage
|
||||
from rangelib import RangeSet
|
||||
from test_utils import ReleaseToolsTestCase
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ from hashlib import sha1
|
|||
import common
|
||||
import test_utils
|
||||
import validate_target_files
|
||||
from images import EmptyImage, DataImage
|
||||
from rangelib import RangeSet
|
||||
|
||||
from blockimgdiff import EmptyImage, DataImage
|
||||
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
|
|
Loading…
Reference in a new issue