2014-09-02 17:53:09 +02:00
|
|
|
# Copyright (C) 2014 The Android Open Source Project
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
from collections import deque, OrderedDict
|
|
|
|
from hashlib import sha1
|
2015-05-21 23:09:49 +02:00
|
|
|
import common
|
2014-09-08 17:29:55 +02:00
|
|
|
import heapq
|
2014-08-26 22:10:25 +02:00
|
|
|
import itertools
|
|
|
|
import multiprocessing
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import tempfile
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
from rangelib import RangeSet
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
|
2014-08-26 19:40:28 +02:00
|
|
|
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def compute_patch(src, tgt, imgdiff=False):
|
|
|
|
srcfd, srcfile = tempfile.mkstemp(prefix="src-")
|
|
|
|
tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
|
|
|
|
patchfd, patchfile = tempfile.mkstemp(prefix="patch-")
|
|
|
|
os.close(patchfd)
|
|
|
|
|
|
|
|
try:
|
|
|
|
with os.fdopen(srcfd, "wb") as f_src:
|
|
|
|
for p in src:
|
|
|
|
f_src.write(p)
|
|
|
|
|
|
|
|
with os.fdopen(tgtfd, "wb") as f_tgt:
|
|
|
|
for p in tgt:
|
|
|
|
f_tgt.write(p)
|
|
|
|
try:
|
|
|
|
os.unlink(patchfile)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
if imgdiff:
|
|
|
|
p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile],
|
|
|
|
stdout=open("/dev/null", "a"),
|
|
|
|
stderr=subprocess.STDOUT)
|
|
|
|
else:
|
|
|
|
p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile])
|
|
|
|
|
|
|
|
if p:
|
|
|
|
raise ValueError("diff failed: " + str(p))
|
|
|
|
|
|
|
|
with open(patchfile, "rb") as f:
|
|
|
|
return f.read()
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
os.unlink(srcfile)
|
|
|
|
os.unlink(tgtfile)
|
|
|
|
os.unlink(patchfile)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
|
|
|
|
class Image(object):
|
|
|
|
def ReadRangeSet(self, ranges):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2015-06-01 22:40:49 +02:00
|
|
|
def TotalSha1(self, include_clobbered_blocks=False):
|
2015-03-24 03:13:21 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
class EmptyImage(Image):
|
2014-08-26 22:10:25 +02:00
|
|
|
"""A zero-length image."""
|
|
|
|
blocksize = 4096
|
|
|
|
care_map = RangeSet()
|
2015-05-12 20:42:31 +02:00
|
|
|
clobbered_blocks = RangeSet()
|
2015-07-10 02:37:49 +02:00
|
|
|
extended = RangeSet()
|
2014-08-26 22:10:25 +02:00
|
|
|
total_blocks = 0
|
|
|
|
file_map = {}
|
|
|
|
def ReadRangeSet(self, ranges):
|
|
|
|
return ()
|
2015-06-01 22:40:49 +02:00
|
|
|
def TotalSha1(self, include_clobbered_blocks=False):
|
|
|
|
# EmptyImage always carries empty clobbered_blocks, so
|
|
|
|
# include_clobbered_blocks can be ignored.
|
|
|
|
assert self.clobbered_blocks.size() == 0
|
2014-08-26 19:40:28 +02:00
|
|
|
return sha1().hexdigest()
|
|
|
|
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
class DataImage(Image):
|
2014-08-26 19:40:28 +02:00
|
|
|
"""An image wrapped around a single string of data."""
|
|
|
|
|
|
|
|
def __init__(self, data, trim=False, pad=False):
|
|
|
|
self.data = data
|
|
|
|
self.blocksize = 4096
|
|
|
|
|
|
|
|
assert not (trim and pad)
|
|
|
|
|
|
|
|
partial = len(self.data) % self.blocksize
|
2015-09-06 05:35:32 +02:00
|
|
|
padded = False
|
2014-08-26 19:40:28 +02:00
|
|
|
if partial > 0:
|
|
|
|
if trim:
|
|
|
|
self.data = self.data[:-partial]
|
|
|
|
elif pad:
|
|
|
|
self.data += '\0' * (self.blocksize - partial)
|
2015-09-06 05:35:32 +02:00
|
|
|
padded = True
|
2014-08-26 19:40:28 +02:00
|
|
|
else:
|
|
|
|
raise ValueError(("data for DataImage must be multiple of %d bytes "
|
|
|
|
"unless trim or pad is specified") %
|
|
|
|
(self.blocksize,))
|
|
|
|
|
|
|
|
assert len(self.data) % self.blocksize == 0
|
|
|
|
|
|
|
|
self.total_blocks = len(self.data) / self.blocksize
|
|
|
|
self.care_map = RangeSet(data=(0, self.total_blocks))
|
2015-09-06 05:35:32 +02:00
|
|
|
# When the last block is padded, we always write the whole block even for
|
|
|
|
# incremental OTAs. Because otherwise the last block may get skipped if
|
|
|
|
# unchanged for an incremental, but would fail the post-install
|
|
|
|
# verification if it has non-zero contents in the padding bytes.
|
|
|
|
# Bug: 23828506
|
|
|
|
if padded:
|
2015-09-08 22:39:40 +02:00
|
|
|
clobbered_blocks = [self.total_blocks-1, self.total_blocks]
|
2015-09-06 05:35:32 +02:00
|
|
|
else:
|
2015-09-08 22:39:40 +02:00
|
|
|
clobbered_blocks = []
|
|
|
|
self.clobbered_blocks = clobbered_blocks
|
2015-07-10 02:37:49 +02:00
|
|
|
self.extended = RangeSet()
|
2014-08-26 19:40:28 +02:00
|
|
|
|
|
|
|
zero_blocks = []
|
|
|
|
nonzero_blocks = []
|
|
|
|
reference = '\0' * self.blocksize
|
|
|
|
|
2015-09-06 05:35:32 +02:00
|
|
|
for i in range(self.total_blocks-1 if padded else self.total_blocks):
|
2014-08-26 19:40:28 +02:00
|
|
|
d = self.data[i*self.blocksize : (i+1)*self.blocksize]
|
|
|
|
if d == reference:
|
|
|
|
zero_blocks.append(i)
|
|
|
|
zero_blocks.append(i+1)
|
|
|
|
else:
|
|
|
|
nonzero_blocks.append(i)
|
|
|
|
nonzero_blocks.append(i+1)
|
|
|
|
|
2015-09-08 22:39:40 +02:00
|
|
|
assert zero_blocks or nonzero_blocks or clobbered_blocks
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2015-09-08 22:39:40 +02:00
|
|
|
self.file_map = dict()
|
|
|
|
if zero_blocks:
|
|
|
|
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
|
|
|
|
if nonzero_blocks:
|
|
|
|
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
|
|
|
|
if clobbered_blocks:
|
|
|
|
self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
|
2015-09-06 05:35:32 +02:00
|
|
|
|
2014-08-26 19:40:28 +02:00
|
|
|
def ReadRangeSet(self, ranges):
|
|
|
|
return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
|
|
|
|
|
2015-06-01 22:40:49 +02:00
|
|
|
def TotalSha1(self, include_clobbered_blocks=False):
|
2015-09-06 05:35:32 +02:00
|
|
|
if not include_clobbered_blocks:
|
|
|
|
ranges = self.care_map.subtract(self.clobbered_blocks)
|
|
|
|
return sha1(self.ReadRangeSet(ranges)).hexdigest()
|
|
|
|
else:
|
|
|
|
return sha1(self.data).hexdigest()
|
2014-08-26 19:40:28 +02:00
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
class Transfer(object):
|
|
|
|
def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id):
|
|
|
|
self.tgt_name = tgt_name
|
|
|
|
self.src_name = src_name
|
|
|
|
self.tgt_ranges = tgt_ranges
|
|
|
|
self.src_ranges = src_ranges
|
|
|
|
self.style = style
|
|
|
|
self.intact = (getattr(tgt_ranges, "monotonic", False) and
|
|
|
|
getattr(src_ranges, "monotonic", False))
|
2015-03-20 03:42:12 +01:00
|
|
|
|
|
|
|
# We use OrderedDict rather than dict so that the output is repeatable;
|
|
|
|
# otherwise it would depend on the hash values of the Transfer objects.
|
|
|
|
self.goes_before = OrderedDict()
|
|
|
|
self.goes_after = OrderedDict()
|
2014-08-26 22:10:25 +02:00
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
self.stash_before = []
|
|
|
|
self.use_stash = []
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
self.id = len(by_id)
|
|
|
|
by_id.append(self)
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
def NetStashChange(self):
|
|
|
|
return (sum(sr.size() for (_, sr) in self.stash_before) -
|
|
|
|
sum(sr.size() for (_, sr) in self.use_stash))
|
|
|
|
|
2015-08-17 18:45:13 +02:00
|
|
|
def ConvertToNew(self):
|
|
|
|
assert self.style != "new"
|
|
|
|
self.use_stash = []
|
|
|
|
self.style = "new"
|
|
|
|
self.src_ranges = RangeSet()
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def __str__(self):
|
|
|
|
return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
|
|
|
|
" to " + str(self.tgt_ranges) + ">")
|
|
|
|
|
|
|
|
|
|
|
|
# BlockImageDiff works on two image objects. An image object is
|
|
|
|
# anything that provides the following attributes:
|
|
|
|
#
|
|
|
|
# blocksize: the size in bytes of a block, currently must be 4096.
|
|
|
|
#
|
|
|
|
# total_blocks: the total size of the partition/image, in blocks.
|
|
|
|
#
|
|
|
|
# care_map: a RangeSet containing which blocks (in the range [0,
|
|
|
|
# total_blocks) we actually care about; i.e. which blocks contain
|
|
|
|
# data.
|
|
|
|
#
|
|
|
|
# file_map: a dict that partitions the blocks contained in care_map
|
|
|
|
# into smaller domains that are useful for doing diffs on.
|
|
|
|
# (Typically a domain is a file, and the key in file_map is the
|
|
|
|
# pathname.)
|
|
|
|
#
|
2015-05-12 20:42:31 +02:00
|
|
|
# clobbered_blocks: a RangeSet containing which blocks contain data
|
|
|
|
# but may be altered by the FS. They need to be excluded when
|
|
|
|
# verifying the partition integrity.
|
|
|
|
#
|
2014-08-26 22:10:25 +02:00
|
|
|
# ReadRangeSet(): a function that takes a RangeSet and returns the
|
|
|
|
# data contained in the image blocks of that RangeSet. The data
|
|
|
|
# is returned as a list or tuple of strings; concatenating the
|
|
|
|
# elements together should produce the requested data.
|
|
|
|
# Implementations are free to break up the data into list/tuple
|
|
|
|
# elements in any way that is convenient.
|
|
|
|
#
|
2014-08-26 19:40:28 +02:00
|
|
|
# TotalSha1(): a function that returns (as a hex string) the SHA-1
|
|
|
|
# hash of all the data in the image (ie, all the blocks in the
|
2015-06-01 22:40:49 +02:00
|
|
|
# care_map minus clobbered_blocks, or including the clobbered
|
|
|
|
# blocks if include_clobbered_blocks is True).
|
2014-08-26 19:40:28 +02:00
|
|
|
#
|
2014-08-26 22:10:25 +02:00
|
|
|
# When creating a BlockImageDiff, the src image may be None, in which
|
|
|
|
# case the list of transfers produced will never read from the
|
|
|
|
# original image.
|
|
|
|
|
|
|
|
class BlockImageDiff(object):
|
2014-12-09 17:40:34 +01:00
|
|
|
def __init__(self, tgt, src=None, threads=None, version=3):
|
2014-08-26 22:10:25 +02:00
|
|
|
if threads is None:
|
|
|
|
threads = multiprocessing.cpu_count() // 2
|
2015-03-24 03:13:21 +01:00
|
|
|
if threads == 0:
|
|
|
|
threads = 1
|
2014-08-26 22:10:25 +02:00
|
|
|
self.threads = threads
|
2014-09-08 17:29:55 +02:00
|
|
|
self.version = version
|
2015-03-24 03:13:21 +01:00
|
|
|
self.transfers = []
|
|
|
|
self.src_basenames = {}
|
|
|
|
self.src_numpatterns = {}
|
2014-09-08 17:29:55 +02:00
|
|
|
|
2014-12-09 17:40:34 +01:00
|
|
|
assert version in (1, 2, 3)
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
self.tgt = tgt
|
|
|
|
if src is None:
|
|
|
|
src = EmptyImage()
|
|
|
|
self.src = src
|
|
|
|
|
|
|
|
# The updater code that installs the patch always uses 4k blocks.
|
|
|
|
assert tgt.blocksize == 4096
|
|
|
|
assert src.blocksize == 4096
|
|
|
|
|
|
|
|
# The range sets in each filemap should comprise a partition of
|
|
|
|
# the care map.
|
|
|
|
self.AssertPartition(src.care_map, src.file_map.values())
|
|
|
|
self.AssertPartition(tgt.care_map, tgt.file_map.values())
|
|
|
|
|
|
|
|
def Compute(self, prefix):
|
|
|
|
# When looking for a source file to use as the diff input for a
|
|
|
|
# target file, we try:
|
|
|
|
# 1) an exact path match if available, otherwise
|
|
|
|
# 2) a exact basename match if available, otherwise
|
|
|
|
# 3) a basename match after all runs of digits are replaced by
|
|
|
|
# "#" if available, otherwise
|
|
|
|
# 4) we have no source for this target.
|
|
|
|
self.AbbreviateSourceNames()
|
|
|
|
self.FindTransfers()
|
|
|
|
|
|
|
|
# Find the ordering dependencies among transfers (this is O(n^2)
|
|
|
|
# in the number of transfers).
|
|
|
|
self.GenerateDigraph()
|
|
|
|
# Find a sequence of transfers that satisfies as many ordering
|
|
|
|
# dependencies as possible (heuristically).
|
|
|
|
self.FindVertexSequence()
|
|
|
|
# Fix up the ordering dependencies that the sequence didn't
|
|
|
|
# satisfy.
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version == 1:
|
|
|
|
self.RemoveBackwardEdges()
|
|
|
|
else:
|
|
|
|
self.ReverseBackwardEdges()
|
|
|
|
self.ImproveVertexSequence()
|
|
|
|
|
2015-08-17 18:45:13 +02:00
|
|
|
# Ensure the runtime stash size is under the limit.
|
|
|
|
if self.version >= 2 and common.OPTIONS.cache_size is not None:
|
|
|
|
self.ReviseStashSize()
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
# Double-check our work.
|
|
|
|
self.AssertSequenceGood()
|
|
|
|
|
|
|
|
self.ComputePatches(prefix)
|
|
|
|
self.WriteTransfers(prefix)
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
|
2014-12-09 17:40:34 +01:00
|
|
|
data = source.ReadRangeSet(ranges)
|
|
|
|
ctx = sha1()
|
|
|
|
|
|
|
|
for p in data:
|
|
|
|
ctx.update(p)
|
|
|
|
|
|
|
|
return ctx.hexdigest()
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def WriteTransfers(self, prefix):
|
|
|
|
out = []
|
|
|
|
|
|
|
|
total = 0
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
stashes = {}
|
|
|
|
stashed_blocks = 0
|
|
|
|
max_stashed_blocks = 0
|
|
|
|
|
|
|
|
free_stash_ids = []
|
|
|
|
next_stash_id = 0
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
for xf in self.transfers:
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version < 2:
|
|
|
|
assert not xf.stash_before
|
|
|
|
assert not xf.use_stash
|
|
|
|
|
|
|
|
for s, sr in xf.stash_before:
|
|
|
|
assert s not in stashes
|
|
|
|
if free_stash_ids:
|
|
|
|
sid = heapq.heappop(free_stash_ids)
|
|
|
|
else:
|
|
|
|
sid = next_stash_id
|
|
|
|
next_stash_id += 1
|
|
|
|
stashes[s] = sid
|
|
|
|
stashed_blocks += sr.size()
|
2014-12-09 17:40:34 +01:00
|
|
|
if self.version == 2:
|
|
|
|
out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
|
|
|
|
else:
|
|
|
|
sh = self.HashBlocks(self.src, sr)
|
|
|
|
if sh in stashes:
|
|
|
|
stashes[sh] += 1
|
|
|
|
else:
|
|
|
|
stashes[sh] = 1
|
|
|
|
out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
|
2014-09-08 17:29:55 +02:00
|
|
|
|
|
|
|
if stashed_blocks > max_stashed_blocks:
|
|
|
|
max_stashed_blocks = stashed_blocks
|
|
|
|
|
2015-03-03 01:53:08 +01:00
|
|
|
free_string = []
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version == 1:
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str = xf.src_ranges.to_string_raw()
|
2014-12-09 17:40:34 +01:00
|
|
|
elif self.version >= 2:
|
2014-09-08 17:29:55 +02:00
|
|
|
|
|
|
|
# <# blocks> <src ranges>
|
|
|
|
# OR
|
|
|
|
# <# blocks> <src ranges> <src locs> <stash refs...>
|
|
|
|
# OR
|
|
|
|
# <# blocks> - <stash refs...>
|
|
|
|
|
|
|
|
size = xf.src_ranges.size()
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str = [str(size)]
|
2014-09-08 17:29:55 +02:00
|
|
|
|
|
|
|
unstashed_src_ranges = xf.src_ranges
|
|
|
|
mapped_stashes = []
|
|
|
|
for s, sr in xf.use_stash:
|
|
|
|
sid = stashes.pop(s)
|
|
|
|
stashed_blocks -= sr.size()
|
|
|
|
unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
|
2014-12-09 17:40:34 +01:00
|
|
|
sh = self.HashBlocks(self.src, sr)
|
2014-09-08 17:29:55 +02:00
|
|
|
sr = xf.src_ranges.map_within(sr)
|
|
|
|
mapped_stashes.append(sr)
|
2014-12-09 17:40:34 +01:00
|
|
|
if self.version == 2:
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str.append("%d:%s" % (sid, sr.to_string_raw()))
|
2015-08-13 23:44:15 +02:00
|
|
|
# A stash will be used only once. We need to free the stash
|
|
|
|
# immediately after the use, instead of waiting for the automatic
|
|
|
|
# clean-up at the end. Because otherwise it may take up extra space
|
|
|
|
# and lead to OTA failures.
|
|
|
|
# Bug: 23119955
|
|
|
|
free_string.append("free %d\n" % (sid,))
|
2014-12-09 17:40:34 +01:00
|
|
|
else:
|
|
|
|
assert sh in stashes
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str.append("%s:%s" % (sh, sr.to_string_raw()))
|
2014-12-09 17:40:34 +01:00
|
|
|
stashes[sh] -= 1
|
|
|
|
if stashes[sh] == 0:
|
|
|
|
free_string.append("free %s\n" % (sh))
|
|
|
|
stashes.pop(sh)
|
2014-09-08 17:29:55 +02:00
|
|
|
heapq.heappush(free_stash_ids, sid)
|
|
|
|
|
|
|
|
if unstashed_src_ranges:
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str.insert(1, unstashed_src_ranges.to_string_raw())
|
2014-09-08 17:29:55 +02:00
|
|
|
if xf.use_stash:
|
|
|
|
mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str.insert(2, mapped_unstashed.to_string_raw())
|
2014-09-08 17:29:55 +02:00
|
|
|
mapped_stashes.append(mapped_unstashed)
|
|
|
|
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
|
|
|
|
else:
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str.insert(1, "-")
|
2014-09-08 17:29:55 +02:00
|
|
|
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
src_str = " ".join(src_str)
|
2014-09-08 17:29:55 +02:00
|
|
|
|
2014-12-09 17:40:34 +01:00
|
|
|
# all versions:
|
2014-09-08 17:29:55 +02:00
|
|
|
# zero <rangeset>
|
|
|
|
# new <rangeset>
|
|
|
|
# erase <rangeset>
|
|
|
|
#
|
|
|
|
# version 1:
|
|
|
|
# bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
|
|
|
|
# imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
|
|
|
|
# move <src rangeset> <tgt rangeset>
|
|
|
|
#
|
|
|
|
# version 2:
|
2015-03-24 03:13:21 +01:00
|
|
|
# bsdiff patchstart patchlen <tgt rangeset> <src_str>
|
|
|
|
# imgdiff patchstart patchlen <tgt rangeset> <src_str>
|
|
|
|
# move <tgt rangeset> <src_str>
|
2014-12-09 17:40:34 +01:00
|
|
|
#
|
|
|
|
# version 3:
|
2015-03-24 03:13:21 +01:00
|
|
|
# bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
|
|
|
|
# imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
|
|
|
|
# move hash <tgt rangeset> <src_str>
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
tgt_size = xf.tgt_ranges.size()
|
|
|
|
|
|
|
|
if xf.style == "new":
|
|
|
|
assert xf.tgt_ranges
|
|
|
|
out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw()))
|
|
|
|
total += tgt_size
|
|
|
|
elif xf.style == "move":
|
|
|
|
assert xf.tgt_ranges
|
|
|
|
assert xf.src_ranges.size() == tgt_size
|
|
|
|
if xf.src_ranges != xf.tgt_ranges:
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version == 1:
|
|
|
|
out.append("%s %s %s\n" % (
|
|
|
|
xf.style,
|
|
|
|
xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
|
|
|
|
elif self.version == 2:
|
|
|
|
out.append("%s %s %s\n" % (
|
|
|
|
xf.style,
|
2015-03-24 03:13:21 +01:00
|
|
|
xf.tgt_ranges.to_string_raw(), src_str))
|
2014-12-09 17:40:34 +01:00
|
|
|
elif self.version >= 3:
|
2015-04-17 17:28:08 +02:00
|
|
|
# take into account automatic stashing of overlapping blocks
|
|
|
|
if xf.src_ranges.overlaps(xf.tgt_ranges):
|
2015-07-10 02:37:49 +02:00
|
|
|
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
|
2015-04-17 17:28:08 +02:00
|
|
|
if temp_stash_usage > max_stashed_blocks:
|
|
|
|
max_stashed_blocks = temp_stash_usage
|
|
|
|
|
2014-12-09 17:40:34 +01:00
|
|
|
out.append("%s %s %s %s\n" % (
|
|
|
|
xf.style,
|
|
|
|
self.HashBlocks(self.tgt, xf.tgt_ranges),
|
2015-03-24 03:13:21 +01:00
|
|
|
xf.tgt_ranges.to_string_raw(), src_str))
|
2014-08-26 22:10:25 +02:00
|
|
|
total += tgt_size
|
|
|
|
elif xf.style in ("bsdiff", "imgdiff"):
|
|
|
|
assert xf.tgt_ranges
|
|
|
|
assert xf.src_ranges
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version == 1:
|
|
|
|
out.append("%s %d %d %s %s\n" % (
|
|
|
|
xf.style, xf.patch_start, xf.patch_len,
|
|
|
|
xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
|
|
|
|
elif self.version == 2:
|
|
|
|
out.append("%s %d %d %s %s\n" % (
|
|
|
|
xf.style, xf.patch_start, xf.patch_len,
|
2015-03-24 03:13:21 +01:00
|
|
|
xf.tgt_ranges.to_string_raw(), src_str))
|
2014-12-09 17:40:34 +01:00
|
|
|
elif self.version >= 3:
|
2015-04-17 17:28:08 +02:00
|
|
|
# take into account automatic stashing of overlapping blocks
|
|
|
|
if xf.src_ranges.overlaps(xf.tgt_ranges):
|
2015-07-10 02:37:49 +02:00
|
|
|
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
|
2015-04-17 17:28:08 +02:00
|
|
|
if temp_stash_usage > max_stashed_blocks:
|
|
|
|
max_stashed_blocks = temp_stash_usage
|
|
|
|
|
2014-12-09 17:40:34 +01:00
|
|
|
out.append("%s %d %d %s %s %s %s\n" % (
|
|
|
|
xf.style,
|
|
|
|
xf.patch_start, xf.patch_len,
|
|
|
|
self.HashBlocks(self.src, xf.src_ranges),
|
|
|
|
self.HashBlocks(self.tgt, xf.tgt_ranges),
|
2015-03-24 03:13:21 +01:00
|
|
|
xf.tgt_ranges.to_string_raw(), src_str))
|
2014-08-26 22:10:25 +02:00
|
|
|
total += tgt_size
|
|
|
|
elif xf.style == "zero":
|
|
|
|
assert xf.tgt_ranges
|
|
|
|
to_zero = xf.tgt_ranges.subtract(xf.src_ranges)
|
|
|
|
if to_zero:
|
|
|
|
out.append("%s %s\n" % (xf.style, to_zero.to_string_raw()))
|
|
|
|
total += to_zero.size()
|
|
|
|
else:
|
2015-03-24 03:13:21 +01:00
|
|
|
raise ValueError("unknown transfer style '%s'\n" % xf.style)
|
2014-08-26 22:10:25 +02:00
|
|
|
|
2014-12-09 17:40:34 +01:00
|
|
|
if free_string:
|
|
|
|
out.append("".join(free_string))
|
|
|
|
|
2015-08-08 04:49:45 +02:00
|
|
|
if self.version >= 2 and common.OPTIONS.cache_size is not None:
|
2015-05-21 23:09:49 +02:00
|
|
|
# Sanity check: abort if we're going to need more stash space than
|
|
|
|
# the allowed size (cache_size * threshold). There are two purposes
|
|
|
|
# of having a threshold here. a) Part of the cache may have been
|
|
|
|
# occupied by some recovery logs. b) It will buy us some time to deal
|
|
|
|
# with the oversize issue.
|
|
|
|
cache_size = common.OPTIONS.cache_size
|
|
|
|
stash_threshold = common.OPTIONS.stash_threshold
|
|
|
|
max_allowed = cache_size * stash_threshold
|
|
|
|
assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \
|
|
|
|
'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
|
|
|
|
max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
|
|
|
|
self.tgt.blocksize, max_allowed, cache_size,
|
|
|
|
stash_threshold)
|
2014-08-26 22:10:25 +02:00
|
|
|
|
2015-07-10 02:37:49 +02:00
|
|
|
# Zero out extended blocks as a workaround for bug 20881595.
|
|
|
|
if self.tgt.extended:
|
|
|
|
out.append("zero %s\n" % (self.tgt.extended.to_string_raw(),))
|
2015-09-09 20:55:01 +02:00
|
|
|
total += self.tgt.extended.size()
|
2015-07-10 02:37:49 +02:00
|
|
|
|
|
|
|
# We erase all the blocks on the partition that a) don't contain useful
|
|
|
|
# data in the new image and b) will not be touched by dm-verity.
|
2014-08-26 22:10:25 +02:00
|
|
|
all_tgt = RangeSet(data=(0, self.tgt.total_blocks))
|
2015-07-10 02:37:49 +02:00
|
|
|
all_tgt_minus_extended = all_tgt.subtract(self.tgt.extended)
|
|
|
|
new_dontcare = all_tgt_minus_extended.subtract(self.tgt.care_map)
|
|
|
|
if new_dontcare:
|
|
|
|
out.append("erase %s\n" % (new_dontcare.to_string_raw(),))
|
2014-09-09 21:38:47 +02:00
|
|
|
|
|
|
|
out.insert(0, "%d\n" % (self.version,)) # format version number
|
2015-09-09 20:55:01 +02:00
|
|
|
out.insert(1, "%d\n" % (total,))
|
2014-09-09 21:38:47 +02:00
|
|
|
if self.version >= 2:
|
|
|
|
# version 2 only: after the total block count, we give the number
|
|
|
|
# of stash slots needed, and the maximum size needed (in blocks)
|
|
|
|
out.insert(2, str(next_stash_id) + "\n")
|
|
|
|
out.insert(3, str(max_stashed_blocks) + "\n")
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
with open(prefix + ".transfer.list", "wb") as f:
|
|
|
|
for i in out:
|
|
|
|
f.write(i)
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
if self.version >= 2:
|
2015-05-21 23:09:49 +02:00
|
|
|
max_stashed_size = max_stashed_blocks * self.tgt.blocksize
|
2015-08-08 04:49:45 +02:00
|
|
|
OPTIONS = common.OPTIONS
|
|
|
|
if OPTIONS.cache_size is not None:
|
|
|
|
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
|
|
|
|
print("max stashed blocks: %d (%d bytes), "
|
|
|
|
"limit: %d bytes (%.2f%%)\n" % (
|
|
|
|
max_stashed_blocks, max_stashed_size, max_allowed,
|
|
|
|
max_stashed_size * 100.0 / max_allowed))
|
|
|
|
else:
|
|
|
|
print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
|
|
|
|
max_stashed_blocks, max_stashed_size))
|
2014-09-08 17:29:55 +02:00
|
|
|
|
2015-08-17 18:45:13 +02:00
|
|
|
def ReviseStashSize(self):
|
|
|
|
print("Revising stash size...")
|
|
|
|
stashes = {}
|
|
|
|
|
|
|
|
# Create the map between a stash and its def/use points. For example, for a
|
|
|
|
# given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
|
|
|
|
for xf in self.transfers:
|
|
|
|
# Command xf defines (stores) all the stashes in stash_before.
|
|
|
|
for idx, sr in xf.stash_before:
|
|
|
|
stashes[idx] = (sr, xf)
|
|
|
|
|
|
|
|
# Record all the stashes command xf uses.
|
|
|
|
for idx, _ in xf.use_stash:
|
|
|
|
stashes[idx] += (xf,)
|
|
|
|
|
|
|
|
# Compute the maximum blocks available for stash based on /cache size and
|
|
|
|
# the threshold.
|
|
|
|
cache_size = common.OPTIONS.cache_size
|
|
|
|
stash_threshold = common.OPTIONS.stash_threshold
|
|
|
|
max_allowed = cache_size * stash_threshold / self.tgt.blocksize
|
|
|
|
|
|
|
|
stashed_blocks = 0
|
2015-08-26 00:10:10 +02:00
|
|
|
new_blocks = 0
|
2015-08-17 18:45:13 +02:00
|
|
|
|
|
|
|
# Now go through all the commands. Compute the required stash size on the
|
|
|
|
# fly. If a command requires excess stash than available, it deletes the
|
|
|
|
# stash by replacing the command that uses the stash with a "new" command
|
|
|
|
# instead.
|
|
|
|
for xf in self.transfers:
|
|
|
|
replaced_cmds = []
|
|
|
|
|
|
|
|
# xf.stash_before generates explicit stash commands.
|
|
|
|
for idx, sr in xf.stash_before:
|
|
|
|
if stashed_blocks + sr.size() > max_allowed:
|
|
|
|
# We cannot stash this one for a later command. Find out the command
|
|
|
|
# that will use this stash and replace the command with "new".
|
|
|
|
use_cmd = stashes[idx][2]
|
|
|
|
replaced_cmds.append(use_cmd)
|
2015-08-26 00:10:10 +02:00
|
|
|
print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
|
2015-08-17 18:45:13 +02:00
|
|
|
else:
|
|
|
|
stashed_blocks += sr.size()
|
|
|
|
|
|
|
|
# xf.use_stash generates free commands.
|
|
|
|
for _, sr in xf.use_stash:
|
|
|
|
stashed_blocks -= sr.size()
|
|
|
|
|
|
|
|
# "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
|
|
|
|
# ComputePatches(), they both have the style of "diff".
|
|
|
|
if xf.style == "diff" and self.version >= 3:
|
|
|
|
assert xf.tgt_ranges and xf.src_ranges
|
|
|
|
if xf.src_ranges.overlaps(xf.tgt_ranges):
|
|
|
|
if stashed_blocks + xf.src_ranges.size() > max_allowed:
|
|
|
|
replaced_cmds.append(xf)
|
2015-08-26 00:10:10 +02:00
|
|
|
print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
|
2015-08-17 18:45:13 +02:00
|
|
|
|
|
|
|
# Replace the commands in replaced_cmds with "new"s.
|
|
|
|
for cmd in replaced_cmds:
|
|
|
|
# It no longer uses any commands in "use_stash". Remove the def points
|
|
|
|
# for all those stashes.
|
|
|
|
for idx, sr in cmd.use_stash:
|
|
|
|
def_cmd = stashes[idx][1]
|
|
|
|
assert (idx, sr) in def_cmd.stash_before
|
|
|
|
def_cmd.stash_before.remove((idx, sr))
|
2015-08-26 00:10:10 +02:00
|
|
|
new_blocks += sr.size()
|
2015-08-17 18:45:13 +02:00
|
|
|
|
|
|
|
cmd.ConvertToNew()
|
|
|
|
|
2015-08-26 00:10:10 +02:00
|
|
|
print(" Total %d blocks are packed as new blocks due to insufficient "
|
|
|
|
"cache size." % (new_blocks,))
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def ComputePatches(self, prefix):
|
|
|
|
print("Reticulating splines...")
|
|
|
|
diff_q = []
|
|
|
|
patch_num = 0
|
|
|
|
with open(prefix + ".new.dat", "wb") as new_f:
|
|
|
|
for xf in self.transfers:
|
|
|
|
if xf.style == "zero":
|
|
|
|
pass
|
|
|
|
elif xf.style == "new":
|
|
|
|
for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
|
|
|
|
new_f.write(piece)
|
|
|
|
elif xf.style == "diff":
|
|
|
|
src = self.src.ReadRangeSet(xf.src_ranges)
|
|
|
|
tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
|
|
|
|
|
|
|
|
# We can't compare src and tgt directly because they may have
|
|
|
|
# the same content but be broken up into blocks differently, eg:
|
|
|
|
#
|
|
|
|
# ["he", "llo"] vs ["h", "ello"]
|
|
|
|
#
|
|
|
|
# We want those to compare equal, ideally without having to
|
|
|
|
# actually concatenate the strings (these may be tens of
|
|
|
|
# megabytes).
|
|
|
|
|
|
|
|
src_sha1 = sha1()
|
|
|
|
for p in src:
|
|
|
|
src_sha1.update(p)
|
|
|
|
tgt_sha1 = sha1()
|
|
|
|
tgt_size = 0
|
|
|
|
for p in tgt:
|
|
|
|
tgt_sha1.update(p)
|
|
|
|
tgt_size += len(p)
|
|
|
|
|
|
|
|
if src_sha1.digest() == tgt_sha1.digest():
|
|
|
|
# These are identical; we don't need to generate a patch,
|
|
|
|
# just issue copy commands on the device.
|
|
|
|
xf.style = "move"
|
|
|
|
else:
|
|
|
|
# For files in zip format (eg, APKs, JARs, etc.) we would
|
|
|
|
# like to use imgdiff -z if possible (because it usually
|
|
|
|
# produces significantly smaller patches than bsdiff).
|
|
|
|
# This is permissible if:
|
|
|
|
#
|
|
|
|
# - the source and target files are monotonic (ie, the
|
|
|
|
# data is stored with blocks in increasing order), and
|
|
|
|
# - we haven't removed any blocks from the source set.
|
|
|
|
#
|
|
|
|
# If these conditions are satisfied then appending all the
|
|
|
|
# blocks in the set together in order will produce a valid
|
|
|
|
# zip file (plus possibly extra zeros in the last block),
|
|
|
|
# which is what imgdiff needs to operate. (imgdiff is
|
|
|
|
# fine with extra zeros at the end of the file.)
|
|
|
|
imgdiff = (xf.intact and
|
|
|
|
xf.tgt_name.split(".")[-1].lower()
|
|
|
|
in ("apk", "jar", "zip"))
|
|
|
|
xf.style = "imgdiff" if imgdiff else "bsdiff"
|
|
|
|
diff_q.append((tgt_size, src, tgt, xf, patch_num))
|
|
|
|
patch_num += 1
|
|
|
|
|
|
|
|
else:
|
|
|
|
assert False, "unknown style " + xf.style
|
|
|
|
|
|
|
|
if diff_q:
|
|
|
|
if self.threads > 1:
|
|
|
|
print("Computing patches (using %d threads)..." % (self.threads,))
|
|
|
|
else:
|
|
|
|
print("Computing patches...")
|
|
|
|
diff_q.sort()
|
|
|
|
|
|
|
|
patches = [None] * patch_num
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
# TODO: Rewrite with multiprocessing.ThreadPool?
|
2014-08-26 22:10:25 +02:00
|
|
|
lock = threading.Lock()
|
|
|
|
def diff_worker():
|
|
|
|
while True:
|
|
|
|
with lock:
|
2015-03-24 03:13:21 +01:00
|
|
|
if not diff_q:
|
|
|
|
return
|
2014-08-26 22:10:25 +02:00
|
|
|
tgt_size, src, tgt, xf, patchnum = diff_q.pop()
|
|
|
|
patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
|
|
|
|
size = len(patch)
|
|
|
|
with lock:
|
|
|
|
patches[patchnum] = (patch, xf)
|
|
|
|
print("%10d %10d (%6.2f%%) %7s %s" % (
|
|
|
|
size, tgt_size, size * 100.0 / tgt_size, xf.style,
|
|
|
|
xf.tgt_name if xf.tgt_name == xf.src_name else (
|
|
|
|
xf.tgt_name + " (from " + xf.src_name + ")")))
|
|
|
|
|
|
|
|
threads = [threading.Thread(target=diff_worker)
|
2015-03-24 03:13:21 +01:00
|
|
|
for _ in range(self.threads)]
|
2014-08-26 22:10:25 +02:00
|
|
|
for th in threads:
|
|
|
|
th.start()
|
|
|
|
while threads:
|
|
|
|
threads.pop().join()
|
|
|
|
else:
|
|
|
|
patches = []
|
|
|
|
|
|
|
|
p = 0
|
|
|
|
with open(prefix + ".patch.dat", "wb") as patch_f:
|
|
|
|
for patch, xf in patches:
|
|
|
|
xf.patch_start = p
|
|
|
|
xf.patch_len = len(patch)
|
|
|
|
patch_f.write(patch)
|
|
|
|
p += len(patch)
|
|
|
|
|
|
|
|
def AssertSequenceGood(self):
|
|
|
|
# Simulate the sequences of transfers we will output, and check that:
|
|
|
|
# - we never read a block after writing it, and
|
|
|
|
# - we write every block we care about exactly once.
|
|
|
|
|
|
|
|
# Start with no blocks having been touched yet.
|
|
|
|
touched = RangeSet()
|
|
|
|
|
|
|
|
# Imagine processing the transfers in order.
|
|
|
|
for xf in self.transfers:
|
|
|
|
# Check that the input blocks for this transfer haven't yet been touched.
|
2014-09-08 17:29:55 +02:00
|
|
|
|
|
|
|
x = xf.src_ranges
|
|
|
|
if self.version >= 2:
|
|
|
|
for _, sr in xf.use_stash:
|
|
|
|
x = x.subtract(sr)
|
|
|
|
|
|
|
|
assert not touched.overlaps(x)
|
2014-08-26 22:10:25 +02:00
|
|
|
# Check that the output blocks for this transfer haven't yet been touched.
|
|
|
|
assert not touched.overlaps(xf.tgt_ranges)
|
|
|
|
# Touch all the blocks written by this transfer.
|
|
|
|
touched = touched.union(xf.tgt_ranges)
|
|
|
|
|
|
|
|
# Check that we've written every target block.
|
|
|
|
assert touched == self.tgt.care_map
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
def ImproveVertexSequence(self):
|
|
|
|
print("Improving vertex order...")
|
|
|
|
|
|
|
|
# At this point our digraph is acyclic; we reversed any edges that
|
|
|
|
# were backwards in the heuristically-generated sequence. The
|
|
|
|
# previously-generated order is still acceptable, but we hope to
|
|
|
|
# find a better order that needs less memory for stashed data.
|
|
|
|
# Now we do a topological sort to generate a new vertex order,
|
|
|
|
# using a greedy algorithm to choose which vertex goes next
|
|
|
|
# whenever we have a choice.
|
|
|
|
|
|
|
|
# Make a copy of the edge set; this copy will get destroyed by the
|
|
|
|
# algorithm.
|
|
|
|
for xf in self.transfers:
|
|
|
|
xf.incoming = xf.goes_after.copy()
|
|
|
|
xf.outgoing = xf.goes_before.copy()
|
|
|
|
|
|
|
|
L = [] # the new vertex order
|
|
|
|
|
|
|
|
# S is the set of sources in the remaining graph; we always choose
|
|
|
|
# the one that leaves the least amount of stashed data after it's
|
|
|
|
# executed.
|
|
|
|
S = [(u.NetStashChange(), u.order, u) for u in self.transfers
|
|
|
|
if not u.incoming]
|
|
|
|
heapq.heapify(S)
|
|
|
|
|
|
|
|
while S:
|
|
|
|
_, _, xf = heapq.heappop(S)
|
|
|
|
L.append(xf)
|
|
|
|
for u in xf.outgoing:
|
|
|
|
del u.incoming[xf]
|
|
|
|
if not u.incoming:
|
|
|
|
heapq.heappush(S, (u.NetStashChange(), u.order, u))
|
|
|
|
|
|
|
|
# if this fails then our graph had a cycle.
|
|
|
|
assert len(L) == len(self.transfers)
|
|
|
|
|
|
|
|
self.transfers = L
|
|
|
|
for i, xf in enumerate(L):
|
|
|
|
xf.order = i
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def RemoveBackwardEdges(self):
|
|
|
|
print("Removing backward edges...")
|
|
|
|
in_order = 0
|
|
|
|
out_of_order = 0
|
|
|
|
lost_source = 0
|
|
|
|
|
|
|
|
for xf in self.transfers:
|
|
|
|
lost = 0
|
|
|
|
size = xf.src_ranges.size()
|
|
|
|
for u in xf.goes_before:
|
|
|
|
# xf should go before u
|
|
|
|
if xf.order < u.order:
|
|
|
|
# it does, hurray!
|
2014-09-08 17:29:55 +02:00
|
|
|
in_order += 1
|
2014-08-26 22:10:25 +02:00
|
|
|
else:
|
|
|
|
# it doesn't, boo. trim the blocks that u writes from xf's
|
|
|
|
# source, so that xf can go after u.
|
2014-09-08 17:29:55 +02:00
|
|
|
out_of_order += 1
|
2014-08-26 22:10:25 +02:00
|
|
|
assert xf.src_ranges.overlaps(u.tgt_ranges)
|
|
|
|
xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges)
|
|
|
|
xf.intact = False
|
|
|
|
|
|
|
|
if xf.style == "diff" and not xf.src_ranges:
|
|
|
|
# nothing left to diff from; treat as new data
|
|
|
|
xf.style = "new"
|
|
|
|
|
|
|
|
lost = size - xf.src_ranges.size()
|
|
|
|
lost_source += lost
|
|
|
|
|
|
|
|
print((" %d/%d dependencies (%.2f%%) were violated; "
|
|
|
|
"%d source blocks removed.") %
|
|
|
|
(out_of_order, in_order + out_of_order,
|
|
|
|
(out_of_order * 100.0 / (in_order + out_of_order))
|
|
|
|
if (in_order + out_of_order) else 0.0,
|
|
|
|
lost_source))
|
|
|
|
|
2014-09-08 17:29:55 +02:00
|
|
|
def ReverseBackwardEdges(self):
|
|
|
|
print("Reversing backward edges...")
|
|
|
|
in_order = 0
|
|
|
|
out_of_order = 0
|
|
|
|
stashes = 0
|
|
|
|
stash_size = 0
|
|
|
|
|
|
|
|
for xf in self.transfers:
|
|
|
|
for u in xf.goes_before.copy():
|
|
|
|
# xf should go before u
|
|
|
|
if xf.order < u.order:
|
|
|
|
# it does, hurray!
|
|
|
|
in_order += 1
|
|
|
|
else:
|
|
|
|
# it doesn't, boo. modify u to stash the blocks that it
|
|
|
|
# writes that xf wants to read, and then require u to go
|
|
|
|
# before xf.
|
|
|
|
out_of_order += 1
|
|
|
|
|
|
|
|
overlap = xf.src_ranges.intersect(u.tgt_ranges)
|
|
|
|
assert overlap
|
|
|
|
|
|
|
|
u.stash_before.append((stashes, overlap))
|
|
|
|
xf.use_stash.append((stashes, overlap))
|
|
|
|
stashes += 1
|
|
|
|
stash_size += overlap.size()
|
|
|
|
|
|
|
|
# reverse the edge direction; now xf must go after u
|
|
|
|
del xf.goes_before[u]
|
|
|
|
del u.goes_after[xf]
|
|
|
|
xf.goes_after[u] = None # value doesn't matter
|
|
|
|
u.goes_before[xf] = None
|
|
|
|
|
|
|
|
print((" %d/%d dependencies (%.2f%%) were violated; "
|
|
|
|
"%d source blocks stashed.") %
|
|
|
|
(out_of_order, in_order + out_of_order,
|
|
|
|
(out_of_order * 100.0 / (in_order + out_of_order))
|
|
|
|
if (in_order + out_of_order) else 0.0,
|
|
|
|
stash_size))
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
def FindVertexSequence(self):
|
|
|
|
print("Finding vertex sequence...")
|
|
|
|
|
|
|
|
# This is based on "A Fast & Effective Heuristic for the Feedback
|
|
|
|
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
|
|
|
|
# it as starting with the digraph G and moving all the vertices to
|
|
|
|
# be on a horizontal line in some order, trying to minimize the
|
|
|
|
# number of edges that end up pointing to the left. Left-pointing
|
|
|
|
# edges will get removed to turn the digraph into a DAG. In this
|
|
|
|
# case each edge has a weight which is the number of source blocks
|
|
|
|
# we'll lose if that edge is removed; we try to minimize the total
|
|
|
|
# weight rather than just the number of edges.
|
|
|
|
|
|
|
|
# Make a copy of the edge set; this copy will get destroyed by the
|
|
|
|
# algorithm.
|
|
|
|
for xf in self.transfers:
|
|
|
|
xf.incoming = xf.goes_after.copy()
|
|
|
|
xf.outgoing = xf.goes_before.copy()
|
|
|
|
|
|
|
|
# We use an OrderedDict instead of just a set so that the output
|
|
|
|
# is repeatable; otherwise it would depend on the hash values of
|
|
|
|
# the transfer objects.
|
|
|
|
G = OrderedDict()
|
|
|
|
for xf in self.transfers:
|
|
|
|
G[xf] = None
|
|
|
|
s1 = deque() # the left side of the sequence, built from left to right
|
|
|
|
s2 = deque() # the right side of the sequence, built from right to left
|
|
|
|
|
|
|
|
while G:
|
|
|
|
|
|
|
|
# Put all sinks at the end of the sequence.
|
|
|
|
while True:
|
|
|
|
sinks = [u for u in G if not u.outgoing]
|
2015-03-24 03:13:21 +01:00
|
|
|
if not sinks:
|
|
|
|
break
|
2014-08-26 22:10:25 +02:00
|
|
|
for u in sinks:
|
|
|
|
s2.appendleft(u)
|
|
|
|
del G[u]
|
|
|
|
for iu in u.incoming:
|
|
|
|
del iu.outgoing[u]
|
|
|
|
|
|
|
|
# Put all the sources at the beginning of the sequence.
|
|
|
|
while True:
|
|
|
|
sources = [u for u in G if not u.incoming]
|
2015-03-24 03:13:21 +01:00
|
|
|
if not sources:
|
|
|
|
break
|
2014-08-26 22:10:25 +02:00
|
|
|
for u in sources:
|
|
|
|
s1.append(u)
|
|
|
|
del G[u]
|
|
|
|
for iu in u.outgoing:
|
|
|
|
del iu.incoming[u]
|
|
|
|
|
2015-03-24 03:13:21 +01:00
|
|
|
if not G:
|
|
|
|
break
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
# Find the "best" vertex to put next. "Best" is the one that
|
|
|
|
# maximizes the net difference in source blocks saved we get by
|
|
|
|
# pretending it's a source rather than a sink.
|
|
|
|
|
|
|
|
max_d = None
|
|
|
|
best_u = None
|
|
|
|
for u in G:
|
|
|
|
d = sum(u.outgoing.values()) - sum(u.incoming.values())
|
|
|
|
if best_u is None or d > max_d:
|
|
|
|
max_d = d
|
|
|
|
best_u = u
|
|
|
|
|
|
|
|
u = best_u
|
|
|
|
s1.append(u)
|
|
|
|
del G[u]
|
|
|
|
for iu in u.outgoing:
|
|
|
|
del iu.incoming[u]
|
|
|
|
for iu in u.incoming:
|
|
|
|
del iu.outgoing[u]
|
|
|
|
|
|
|
|
# Now record the sequence in the 'order' field of each transfer,
|
|
|
|
# and by rearranging self.transfers to be in the chosen sequence.
|
|
|
|
|
|
|
|
new_transfers = []
|
|
|
|
for x in itertools.chain(s1, s2):
|
|
|
|
x.order = len(new_transfers)
|
|
|
|
new_transfers.append(x)
|
|
|
|
del x.incoming
|
|
|
|
del x.outgoing
|
|
|
|
|
|
|
|
self.transfers = new_transfers
|
|
|
|
|
|
|
|
def GenerateDigraph(self):
|
|
|
|
print("Generating digraph...")
|
|
|
|
for a in self.transfers:
|
|
|
|
for b in self.transfers:
|
2015-03-24 03:13:21 +01:00
|
|
|
if a is b:
|
|
|
|
continue
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
# If the blocks written by A are read by B, then B needs to go before A.
|
|
|
|
i = a.tgt_ranges.intersect(b.src_ranges)
|
|
|
|
if i:
|
2014-08-26 19:40:28 +02:00
|
|
|
if b.src_name == "__ZERO":
|
|
|
|
# the cost of removing source blocks for the __ZERO domain
|
|
|
|
# is (nearly) zero.
|
|
|
|
size = 0
|
|
|
|
else:
|
|
|
|
size = i.size()
|
2014-08-26 22:10:25 +02:00
|
|
|
b.goes_before[a] = size
|
|
|
|
a.goes_after[b] = size
|
|
|
|
|
|
|
|
def FindTransfers(self):
|
2015-08-26 00:10:10 +02:00
|
|
|
"""Parse the file_map to generate all the transfers."""
|
|
|
|
|
|
|
|
def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
|
|
|
|
split=False):
|
|
|
|
"""Wrapper function for adding a Transfer().
|
|
|
|
|
|
|
|
For BBOTA v3, we need to stash source blocks for resumable feature.
|
|
|
|
However, with the growth of file size and the shrink of the cache
|
|
|
|
partition source blocks are too large to be stashed. If a file occupies
|
|
|
|
too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it
|
|
|
|
into smaller pieces by getting multiple Transfer()s.
|
|
|
|
|
|
|
|
The downside is that after splitting, we can no longer use imgdiff but
|
|
|
|
only bsdiff."""
|
|
|
|
|
|
|
|
MAX_BLOCKS_PER_DIFF_TRANSFER = 1024
|
|
|
|
|
|
|
|
# We care about diff transfers only.
|
|
|
|
if style != "diff" or not split:
|
|
|
|
Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Change nothing for small files.
|
|
|
|
if (tgt_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER and
|
|
|
|
src_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER):
|
|
|
|
Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
|
|
|
|
return
|
|
|
|
|
|
|
|
pieces = 0
|
|
|
|
while (tgt_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER and
|
|
|
|
src_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER):
|
|
|
|
tgt_split_name = "%s-%d" % (tgt_name, pieces)
|
|
|
|
src_split_name = "%s-%d" % (src_name, pieces)
|
|
|
|
tgt_first = tgt_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER)
|
|
|
|
src_first = src_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER)
|
|
|
|
Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
|
|
|
|
by_id)
|
|
|
|
|
|
|
|
tgt_ranges = tgt_ranges.subtract(tgt_first)
|
|
|
|
src_ranges = src_ranges.subtract(src_first)
|
|
|
|
pieces += 1
|
|
|
|
|
|
|
|
# Handle remaining blocks.
|
|
|
|
if tgt_ranges.size() or src_ranges.size():
|
|
|
|
# Must be both non-empty.
|
|
|
|
assert tgt_ranges.size() and src_ranges.size()
|
|
|
|
tgt_split_name = "%s-%d" % (tgt_name, pieces)
|
|
|
|
src_split_name = "%s-%d" % (src_name, pieces)
|
|
|
|
Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
|
|
|
|
by_id)
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
empty = RangeSet()
|
|
|
|
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
|
|
|
|
if tgt_fn == "__ZERO":
|
|
|
|
# the special "__ZERO" domain is all the blocks not contained
|
|
|
|
# in any file and that are filled with zeros. We have a
|
|
|
|
# special transfer style for zero blocks.
|
|
|
|
src_ranges = self.src.file_map.get("__ZERO", empty)
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
|
|
|
|
"zero", self.transfers)
|
2014-08-26 22:10:25 +02:00
|
|
|
continue
|
|
|
|
|
2015-05-12 20:42:31 +02:00
|
|
|
elif tgt_fn == "__COPY":
|
|
|
|
# "__COPY" domain includes all the blocks not contained in any
|
|
|
|
# file and that need to be copied unconditionally to the target.
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
|
2015-05-12 20:42:31 +02:00
|
|
|
continue
|
|
|
|
|
2014-08-26 22:10:25 +02:00
|
|
|
elif tgt_fn in self.src.file_map:
|
|
|
|
# Look for an exact pathname match in the source.
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
|
|
|
|
"diff", self.transfers, self.version >= 3)
|
2014-08-26 22:10:25 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
b = os.path.basename(tgt_fn)
|
|
|
|
if b in self.src_basenames:
|
|
|
|
# Look for an exact basename match in the source.
|
|
|
|
src_fn = self.src_basenames[b]
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
|
|
|
|
"diff", self.transfers, self.version >= 3)
|
2014-08-26 22:10:25 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
b = re.sub("[0-9]+", "#", b)
|
|
|
|
if b in self.src_numpatterns:
|
|
|
|
# Look for a 'number pattern' match (a basename match after
|
|
|
|
# all runs of digits are replaced by "#"). (This is useful
|
|
|
|
# for .so files that contain version numbers in the filename
|
|
|
|
# that get bumped.)
|
|
|
|
src_fn = self.src_numpatterns[b]
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
|
|
|
|
"diff", self.transfers, self.version >= 3)
|
2014-08-26 22:10:25 +02:00
|
|
|
continue
|
|
|
|
|
2015-08-26 00:10:10 +02:00
|
|
|
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
|
2014-08-26 22:10:25 +02:00
|
|
|
|
|
|
|
def AbbreviateSourceNames(self):
|
|
|
|
for k in self.src.file_map.keys():
|
|
|
|
b = os.path.basename(k)
|
|
|
|
self.src_basenames[b] = k
|
|
|
|
b = re.sub("[0-9]+", "#", b)
|
|
|
|
self.src_numpatterns[b] = k
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def AssertPartition(total, seq):
|
|
|
|
"""Assert that all the RangeSets in 'seq' form a partition of the
|
|
|
|
'total' RangeSet (ie, they are nonintersecting and their union
|
|
|
|
equals 'total')."""
|
|
|
|
so_far = RangeSet()
|
|
|
|
for i in seq:
|
|
|
|
assert not so_far.overlaps(i)
|
|
|
|
so_far = so_far.union(i)
|
|
|
|
assert so_far == total
|