Merge "[incfs] Stream the hash tree for incremental installation" into rvc-dev-plus-aosp
This commit is contained in:
commit
31484c4d88
4 changed files with 326 additions and 161 deletions
|
@ -16,13 +16,13 @@
|
|||
|
||||
#include "incremental.h"
|
||||
|
||||
#include <android-base/endian.h>
|
||||
#include "incremental_utils.h"
|
||||
|
||||
#include <android-base/file.h>
|
||||
#include <android-base/stringprintf.h>
|
||||
#include <openssl/base64.h>
|
||||
|
||||
#include "adb_client.h"
|
||||
#include "adb_io.h"
|
||||
#include "adb_utils.h"
|
||||
#include "commandline.h"
|
||||
#include "sysdeps.h"
|
||||
|
@ -31,65 +31,8 @@ using namespace std::literals;
|
|||
|
||||
namespace incremental {
|
||||
|
||||
namespace {
|
||||
|
||||
static constexpr auto IDSIG = ".idsig"sv;
|
||||
|
||||
using android::base::StringPrintf;
|
||||
|
||||
using Size = int64_t;
|
||||
|
||||
static inline int32_t read_int32(borrowed_fd fd) {
|
||||
int32_t result;
|
||||
return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1;
|
||||
}
|
||||
|
||||
static inline void append_int(borrowed_fd fd, std::vector<char>* bytes) {
|
||||
int32_t le_val = read_int32(fd);
|
||||
auto old_size = bytes->size();
|
||||
bytes->resize(old_size + sizeof(le_val));
|
||||
memcpy(bytes->data() + old_size, &le_val, sizeof(le_val));
|
||||
}
|
||||
|
||||
static inline void append_bytes_with_size(borrowed_fd fd, std::vector<char>* bytes) {
|
||||
int32_t le_size = read_int32(fd);
|
||||
if (le_size < 0) {
|
||||
return;
|
||||
}
|
||||
int32_t size = int32_t(le32toh(le_size));
|
||||
auto old_size = bytes->size();
|
||||
bytes->resize(old_size + sizeof(le_size) + size);
|
||||
memcpy(bytes->data() + old_size, &le_size, sizeof(le_size));
|
||||
ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size);
|
||||
}
|
||||
|
||||
static inline std::pair<std::vector<char>, int32_t> read_id_sig_headers(borrowed_fd fd) {
|
||||
std::vector<char> result;
|
||||
append_int(fd, &result); // version
|
||||
append_bytes_with_size(fd, &result); // hashingInfo
|
||||
append_bytes_with_size(fd, &result); // signingInfo
|
||||
auto le_tree_size = read_int32(fd);
|
||||
auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree
|
||||
return {std::move(result), tree_size};
|
||||
}
|
||||
|
||||
static inline Size verity_tree_size_for_file(Size fileSize) {
|
||||
constexpr int INCFS_DATA_FILE_BLOCK_SIZE = 4096;
|
||||
constexpr int SHA256_DIGEST_SIZE = 32;
|
||||
constexpr int digest_size = SHA256_DIGEST_SIZE;
|
||||
constexpr int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
|
||||
|
||||
Size total_tree_block_count = 0;
|
||||
|
||||
auto block_count = 1 + (fileSize - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
|
||||
auto hash_block_count = block_count;
|
||||
for (auto i = 0; hash_block_count > 1; i++) {
|
||||
hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block;
|
||||
total_tree_block_count += hash_block_count;
|
||||
}
|
||||
return total_tree_block_count * INCFS_DATA_FILE_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
// Read, verify and return the signature bytes. Keeping fd at the position of start of verity tree.
|
||||
static std::pair<unique_fd, std::vector<char>> read_signature(Size file_size,
|
||||
std::string signature_file,
|
||||
|
@ -104,7 +47,7 @@ static std::pair<unique_fd, std::vector<char>> read_signature(Size file_size,
|
|||
return {};
|
||||
}
|
||||
|
||||
unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY | O_CLOEXEC));
|
||||
unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY));
|
||||
if (fd < 0) {
|
||||
if (!silent) {
|
||||
fprintf(stderr, "Failed to open signature file: %s. Abort.\n", signature_file.c_str());
|
||||
|
@ -172,9 +115,8 @@ static unique_fd start_install(const Files& files, bool silent) {
|
|||
return {};
|
||||
}
|
||||
|
||||
auto file_desc =
|
||||
StringPrintf("%s:%lld:%s:%s", android::base::Basename(file).c_str(),
|
||||
(long long)st.st_size, std::to_string(i).c_str(), signature.c_str());
|
||||
auto file_desc = StringPrintf("%s:%lld:%d:%s:1", android::base::Basename(file).c_str(),
|
||||
(long long)st.st_size, i, signature.c_str());
|
||||
command_args.push_back(std::move(file_desc));
|
||||
|
||||
signature_fds.push_back(std::move(signature_fd));
|
||||
|
@ -190,21 +132,9 @@ static unique_fd start_install(const Files& files, bool silent) {
|
|||
return {};
|
||||
}
|
||||
|
||||
// Pushing verity trees for all installation files.
|
||||
for (auto&& local_fd : signature_fds) {
|
||||
if (!copy_to_file(local_fd.get(), connection_fd.get())) {
|
||||
if (!silent) {
|
||||
fprintf(stderr, "Failed to stream tree bytes: %s. Abort.\n", strerror(errno));
|
||||
}
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
return connection_fd;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool can_install(const Files& files) {
|
||||
for (const auto& file : files) {
|
||||
struct stat st;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -44,9 +44,10 @@
|
|||
|
||||
namespace incremental {
|
||||
|
||||
static constexpr int kBlockSize = 4096;
|
||||
static constexpr int kHashesPerBlock = kBlockSize / kDigestSize;
|
||||
static constexpr int kCompressedSizeMax = kBlockSize * 0.95;
|
||||
static constexpr int8_t kTypeData = 0;
|
||||
static constexpr int8_t kTypeHash = 1;
|
||||
static constexpr int8_t kCompressionNone = 0;
|
||||
static constexpr int8_t kCompressionLZ4 = 1;
|
||||
static constexpr int kCompressBound = std::max(kBlockSize, LZ4_COMPRESSBOUND(kBlockSize));
|
||||
|
@ -132,41 +133,64 @@ struct ResponseHeader {
|
|||
CompressionType compression_type; // 1 byte
|
||||
BlockIdx block_idx; // 4 bytes
|
||||
BlockSize block_size; // 2 bytes
|
||||
|
||||
static constexpr size_t responseSizeFor(size_t dataSize) {
|
||||
return dataSize + sizeof(ResponseHeader);
|
||||
}
|
||||
} __attribute__((packed));
|
||||
|
||||
template <size_t Size = kBlockSize>
|
||||
struct BlockBuffer {
|
||||
ResponseHeader header;
|
||||
char data[Size];
|
||||
} __attribute__((packed));
|
||||
|
||||
// Holds streaming state for a file
|
||||
class File {
|
||||
public:
|
||||
// Plain file
|
||||
File(const char* filepath, FileId id, int64_t size, unique_fd fd) : File(filepath, id, size) {
|
||||
File(const char* filepath, FileId id, int64_t size, unique_fd fd, int64_t tree_offset,
|
||||
unique_fd tree_fd)
|
||||
: File(filepath, id, size, tree_offset) {
|
||||
this->fd_ = std::move(fd);
|
||||
this->tree_fd_ = std::move(tree_fd);
|
||||
priority_blocks_ = PriorityBlocksForFile(filepath, fd_.get(), size);
|
||||
}
|
||||
int64_t ReadBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed,
|
||||
std::string* error) const {
|
||||
char* buf_ptr = static_cast<char*>(buf);
|
||||
int64_t ReadDataBlock(BlockIdx block_idx, void* buf, bool* is_zip_compressed) const {
|
||||
int64_t bytes_read = -1;
|
||||
const off64_t offsetStart = blockIndexToOffset(block_idx);
|
||||
bytes_read = adb_pread(fd_, &buf_ptr[sizeof(ResponseHeader)], kBlockSize, offsetStart);
|
||||
bytes_read = adb_pread(fd_, buf, kBlockSize, offsetStart);
|
||||
return bytes_read;
|
||||
}
|
||||
int64_t ReadTreeBlock(BlockIdx block_idx, void* buf) const {
|
||||
int64_t bytes_read = -1;
|
||||
const off64_t offsetStart = tree_offset_ + blockIndexToOffset(block_idx);
|
||||
bytes_read = adb_pread(tree_fd_, buf, kBlockSize, offsetStart);
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
const unique_fd& RawFd() const { return fd_; }
|
||||
const std::vector<BlockIdx>& PriorityBlocks() const { return priority_blocks_; }
|
||||
|
||||
std::vector<bool> sentBlocks;
|
||||
NumBlocks sentBlocksCount = 0;
|
||||
|
||||
std::vector<bool> sentTreeBlocks;
|
||||
|
||||
const char* const filepath;
|
||||
const FileId id;
|
||||
const int64_t size;
|
||||
|
||||
private:
|
||||
File(const char* filepath, FileId id, int64_t size) : filepath(filepath), id(id), size(size) {
|
||||
File(const char* filepath, FileId id, int64_t size, int64_t tree_offset)
|
||||
: filepath(filepath), id(id), size(size), tree_offset_(tree_offset) {
|
||||
sentBlocks.resize(numBytesToNumBlocks(size));
|
||||
sentTreeBlocks.resize(verity_tree_blocks_for_file(size));
|
||||
}
|
||||
unique_fd fd_;
|
||||
std::vector<BlockIdx> priority_blocks_;
|
||||
|
||||
unique_fd tree_fd_;
|
||||
const int64_t tree_offset_;
|
||||
};
|
||||
|
||||
class IncrementalServer {
|
||||
|
@ -174,6 +198,8 @@ class IncrementalServer {
|
|||
IncrementalServer(unique_fd adb_fd, unique_fd output_fd, std::vector<File> files)
|
||||
: adb_fd_(std::move(adb_fd)), output_fd_(std::move(output_fd)), files_(std::move(files)) {
|
||||
buffer_.reserve(kReadBufferSize);
|
||||
pendingBlocksBuffer_.resize(kChunkFlushSize + 2 * kBlockSize);
|
||||
pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader);
|
||||
}
|
||||
|
||||
bool Serve();
|
||||
|
@ -208,7 +234,11 @@ class IncrementalServer {
|
|||
void erase_buffer_head(int count) { buffer_.erase(buffer_.begin(), buffer_.begin() + count); }
|
||||
|
||||
enum class SendResult { Sent, Skipped, Error };
|
||||
SendResult SendBlock(FileId fileId, BlockIdx blockIdx, bool flush = false);
|
||||
SendResult SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush = false);
|
||||
|
||||
bool SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx);
|
||||
bool SendTreeBlocksForDataBlock(FileId fileId, BlockIdx blockIdx);
|
||||
|
||||
bool SendDone();
|
||||
void RunPrefetching();
|
||||
|
||||
|
@ -228,7 +258,10 @@ class IncrementalServer {
|
|||
int compressed_ = 0, uncompressed_ = 0;
|
||||
long long sentSize_ = 0;
|
||||
|
||||
std::vector<char> pendingBlocks_;
|
||||
static constexpr auto kChunkFlushSize = 31 * kBlockSize;
|
||||
|
||||
std::vector<char> pendingBlocksBuffer_;
|
||||
char* pendingBlocks_ = nullptr;
|
||||
|
||||
// True when client notifies that all the data has been received
|
||||
bool servingComplete_ = false;
|
||||
|
@ -250,7 +283,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking)
|
|||
|
||||
if (bcur > 0) {
|
||||
// output the rest.
|
||||
WriteFdExactly(output_fd_, buffer_.data(), bcur);
|
||||
(void)WriteFdExactly(output_fd_, buffer_.data(), bcur);
|
||||
erase_buffer_head(bcur);
|
||||
}
|
||||
|
||||
|
@ -265,9 +298,10 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking)
|
|||
auto res = adb_poll(&pfd, 1, blocking ? kPollTimeoutMillis : 0);
|
||||
|
||||
if (res != 1) {
|
||||
WriteFdExactly(output_fd_, buffer_.data(), buffer_.size());
|
||||
auto err = errno;
|
||||
(void)WriteFdExactly(output_fd_, buffer_.data(), buffer_.size());
|
||||
if (res < 0) {
|
||||
D("Failed to poll: %s\n", strerror(errno));
|
||||
D("Failed to poll: %s", strerror(err));
|
||||
return false;
|
||||
}
|
||||
if (blocking) {
|
||||
|
@ -289,7 +323,7 @@ bool IncrementalServer::SkipToRequest(void* buffer, size_t* size, bool blocking)
|
|||
continue;
|
||||
}
|
||||
|
||||
D("Failed to read from fd %d: %d. Exit\n", adb_fd_.get(), errno);
|
||||
D("Failed to read from fd %d: %d. Exit", adb_fd_.get(), errno);
|
||||
break;
|
||||
}
|
||||
// socket is closed. print remaining messages
|
||||
|
@ -313,56 +347,113 @@ std::optional<RequestCommand> IncrementalServer::ReadRequest(bool blocking) {
|
|||
return request;
|
||||
}
|
||||
|
||||
auto IncrementalServer::SendBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult {
|
||||
bool IncrementalServer::SendTreeBlocksForDataBlock(const FileId fileId, const BlockIdx blockIdx) {
|
||||
auto& file = files_[fileId];
|
||||
const int32_t data_block_count = numBytesToNumBlocks(file.size);
|
||||
|
||||
const int32_t total_nodes_count(file.sentTreeBlocks.size());
|
||||
const int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock;
|
||||
|
||||
const int32_t leaf_nodes_offset = total_nodes_count - leaf_nodes_count;
|
||||
|
||||
// Leaf level, sending only 1 block.
|
||||
const int32_t leaf_idx = leaf_nodes_offset + blockIdx / kHashesPerBlock;
|
||||
if (file.sentTreeBlocks[leaf_idx]) {
|
||||
return true;
|
||||
}
|
||||
if (!SendTreeBlock(fileId, blockIdx, leaf_idx)) {
|
||||
return false;
|
||||
}
|
||||
file.sentTreeBlocks[leaf_idx] = true;
|
||||
|
||||
// Non-leaf, sending EVERYTHING. This should be done only once.
|
||||
if (leaf_nodes_offset == 0 || file.sentTreeBlocks[0]) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < leaf_nodes_offset; ++i) {
|
||||
if (!SendTreeBlock(fileId, blockIdx, i)) {
|
||||
return false;
|
||||
}
|
||||
file.sentTreeBlocks[i] = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IncrementalServer::SendTreeBlock(FileId fileId, int32_t fileBlockIdx, BlockIdx blockIdx) {
|
||||
const auto& file = files_[fileId];
|
||||
|
||||
BlockBuffer buffer;
|
||||
const int64_t bytesRead = file.ReadTreeBlock(blockIdx, buffer.data);
|
||||
if (bytesRead <= 0) {
|
||||
fprintf(stderr, "Failed to get data for %s.idsig at blockIdx=%d.\n", file.filepath,
|
||||
blockIdx);
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer.header.compression_type = kCompressionNone;
|
||||
buffer.header.block_type = kTypeHash;
|
||||
buffer.header.file_id = toBigEndian(fileId);
|
||||
buffer.header.block_size = toBigEndian(int16_t(bytesRead));
|
||||
buffer.header.block_idx = toBigEndian(blockIdx);
|
||||
|
||||
Send(&buffer, ResponseHeader::responseSizeFor(bytesRead), /*flush=*/false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto IncrementalServer::SendDataBlock(FileId fileId, BlockIdx blockIdx, bool flush) -> SendResult {
|
||||
auto& file = files_[fileId];
|
||||
if (blockIdx >= static_cast<long>(file.sentBlocks.size())) {
|
||||
fprintf(stderr, "Failed to read file %s at block %" PRId32 " (past end).\n", file.filepath,
|
||||
blockIdx);
|
||||
return SendResult::Error;
|
||||
// may happen as we schedule some extra blocks for reported page misses
|
||||
D("Skipped reading file %s at block %" PRId32 " (past end).", file.filepath, blockIdx);
|
||||
return SendResult::Skipped;
|
||||
}
|
||||
if (file.sentBlocks[blockIdx]) {
|
||||
return SendResult::Skipped;
|
||||
}
|
||||
std::string error;
|
||||
char raw[sizeof(ResponseHeader) + kBlockSize];
|
||||
bool isZipCompressed = false;
|
||||
const int64_t bytesRead = file.ReadBlock(blockIdx, &raw, &isZipCompressed, &error);
|
||||
if (bytesRead < 0) {
|
||||
fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%s).\n", file.filepath, blockIdx,
|
||||
error.c_str());
|
||||
|
||||
if (!SendTreeBlocksForDataBlock(fileId, blockIdx)) {
|
||||
return SendResult::Error;
|
||||
}
|
||||
|
||||
ResponseHeader* header = nullptr;
|
||||
char data[sizeof(ResponseHeader) + kCompressBound];
|
||||
char* compressed = data + sizeof(*header);
|
||||
BlockBuffer raw;
|
||||
bool isZipCompressed = false;
|
||||
const int64_t bytesRead = file.ReadDataBlock(blockIdx, raw.data, &isZipCompressed);
|
||||
if (bytesRead < 0) {
|
||||
fprintf(stderr, "Failed to get data for %s at blockIdx=%d (%d).\n", file.filepath, blockIdx,
|
||||
errno);
|
||||
return SendResult::Error;
|
||||
}
|
||||
|
||||
BlockBuffer<kCompressBound> compressed;
|
||||
int16_t compressedSize = 0;
|
||||
if (!isZipCompressed) {
|
||||
compressedSize =
|
||||
LZ4_compress_default(raw + sizeof(*header), compressed, bytesRead, kCompressBound);
|
||||
compressedSize = LZ4_compress_default(raw.data, compressed.data, bytesRead, kCompressBound);
|
||||
}
|
||||
int16_t blockSize;
|
||||
ResponseHeader* header;
|
||||
if (compressedSize > 0 && compressedSize < kCompressedSizeMax) {
|
||||
++compressed_;
|
||||
blockSize = compressedSize;
|
||||
header = reinterpret_cast<ResponseHeader*>(data);
|
||||
header = &compressed.header;
|
||||
header->compression_type = kCompressionLZ4;
|
||||
} else {
|
||||
++uncompressed_;
|
||||
blockSize = bytesRead;
|
||||
header = reinterpret_cast<ResponseHeader*>(raw);
|
||||
header = &raw.header;
|
||||
header->compression_type = kCompressionNone;
|
||||
}
|
||||
|
||||
header->block_type = kTypeData;
|
||||
|
||||
header->file_id = toBigEndian(fileId);
|
||||
header->block_size = toBigEndian(blockSize);
|
||||
header->block_idx = toBigEndian(blockIdx);
|
||||
|
||||
file.sentBlocks[blockIdx] = true;
|
||||
file.sentBlocksCount += 1;
|
||||
Send(header, sizeof(*header) + blockSize, flush);
|
||||
Send(header, ResponseHeader::responseSizeFor(blockSize), flush);
|
||||
|
||||
return SendResult::Sent;
|
||||
}
|
||||
|
||||
|
@ -388,7 +479,8 @@ void IncrementalServer::RunPrefetching() {
|
|||
if (!priority_blocks.empty()) {
|
||||
for (auto& i = prefetch.priorityIndex;
|
||||
blocksToSend > 0 && i < (BlockIdx)priority_blocks.size(); ++i) {
|
||||
if (auto res = SendBlock(file.id, priority_blocks[i]); res == SendResult::Sent) {
|
||||
if (auto res = SendDataBlock(file.id, priority_blocks[i]);
|
||||
res == SendResult::Sent) {
|
||||
--blocksToSend;
|
||||
} else if (res == SendResult::Error) {
|
||||
fprintf(stderr, "Failed to send priority block %" PRId32 "\n", i);
|
||||
|
@ -396,7 +488,7 @@ void IncrementalServer::RunPrefetching() {
|
|||
}
|
||||
}
|
||||
for (auto& i = prefetch.overallIndex; blocksToSend > 0 && i < prefetch.overallEnd; ++i) {
|
||||
if (auto res = SendBlock(file.id, i); res == SendResult::Sent) {
|
||||
if (auto res = SendDataBlock(file.id, i); res == SendResult::Sent) {
|
||||
--blocksToSend;
|
||||
} else if (res == SendResult::Error) {
|
||||
fprintf(stderr, "Failed to send block %" PRId32 "\n", i);
|
||||
|
@ -409,30 +501,25 @@ void IncrementalServer::RunPrefetching() {
|
|||
}
|
||||
|
||||
void IncrementalServer::Send(const void* data, size_t size, bool flush) {
|
||||
constexpr auto kChunkFlushSize = 31 * kBlockSize;
|
||||
|
||||
if (pendingBlocks_.empty()) {
|
||||
pendingBlocks_.resize(sizeof(ChunkHeader));
|
||||
}
|
||||
pendingBlocks_.insert(pendingBlocks_.end(), static_cast<const char*>(data),
|
||||
static_cast<const char*>(data) + size);
|
||||
if (flush || pendingBlocks_.size() > kChunkFlushSize) {
|
||||
pendingBlocks_ = std::copy_n(static_cast<const char*>(data), size, pendingBlocks_);
|
||||
if (flush || pendingBlocks_ - pendingBlocksBuffer_.data() > kChunkFlushSize) {
|
||||
Flush();
|
||||
}
|
||||
}
|
||||
|
||||
void IncrementalServer::Flush() {
|
||||
if (pendingBlocks_.empty()) {
|
||||
auto dataBytes = pendingBlocks_ - (pendingBlocksBuffer_.data() + sizeof(ChunkHeader));
|
||||
if (dataBytes == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
*(ChunkHeader*)pendingBlocks_.data() =
|
||||
toBigEndian<int32_t>(pendingBlocks_.size() - sizeof(ChunkHeader));
|
||||
if (!WriteFdExactly(adb_fd_, pendingBlocks_.data(), pendingBlocks_.size())) {
|
||||
fprintf(stderr, "Failed to write %d bytes\n", int(pendingBlocks_.size()));
|
||||
*(ChunkHeader*)pendingBlocksBuffer_.data() = toBigEndian<int32_t>(dataBytes);
|
||||
auto totalBytes = sizeof(ChunkHeader) + dataBytes;
|
||||
if (!WriteFdExactly(adb_fd_, pendingBlocksBuffer_.data(), totalBytes)) {
|
||||
fprintf(stderr, "Failed to write %d bytes\n", int(totalBytes));
|
||||
}
|
||||
sentSize_ += pendingBlocks_.size();
|
||||
pendingBlocks_.clear();
|
||||
sentSize_ += totalBytes;
|
||||
pendingBlocks_ = pendingBlocksBuffer_.data() + sizeof(ChunkHeader);
|
||||
}
|
||||
|
||||
bool IncrementalServer::ServingComplete(std::optional<TimePoint> startTime, int missesCount,
|
||||
|
@ -443,7 +530,7 @@ bool IncrementalServer::ServingComplete(std::optional<TimePoint> startTime, int
|
|||
D("Streaming completed.\n"
|
||||
"Misses: %d, of those unique: %d; sent compressed: %d, uncompressed: "
|
||||
"%d, mb: %.3f\n"
|
||||
"Total time taken: %.3fms\n",
|
||||
"Total time taken: %.3fms",
|
||||
missesCount, missesSent, compressed_, uncompressed_, sentSize_ / 1024.0 / 1024.0,
|
||||
duration_cast<microseconds>(endTime - (startTime ? *startTime : endTime)).count() / 1000.0);
|
||||
return true;
|
||||
|
@ -510,9 +597,21 @@ bool IncrementalServer::Serve() {
|
|||
fileId, blockIdx);
|
||||
break;
|
||||
}
|
||||
// fprintf(stderr, "\treading file %d block %04d\n", (int)fileId,
|
||||
// (int)blockIdx);
|
||||
if (auto res = SendBlock(fileId, blockIdx, true); res == SendResult::Error) {
|
||||
|
||||
if (VLOG_IS_ON(INCREMENTAL)) {
|
||||
auto& file = files_[fileId];
|
||||
auto posP = std::find(file.PriorityBlocks().begin(),
|
||||
file.PriorityBlocks().end(), blockIdx);
|
||||
D("\tMISSING BLOCK: reading file %d block %04d (in priority: %d of %d)",
|
||||
(int)fileId, (int)blockIdx,
|
||||
posP == file.PriorityBlocks().end()
|
||||
? -1
|
||||
: int(posP - file.PriorityBlocks().begin()),
|
||||
int(file.PriorityBlocks().size()));
|
||||
}
|
||||
|
||||
if (auto res = SendDataBlock(fileId, blockIdx, true);
|
||||
res == SendResult::Error) {
|
||||
fprintf(stderr, "Failed to send block %" PRId32 ".\n", blockIdx);
|
||||
} else if (res == SendResult::Sent) {
|
||||
++missesSent;
|
||||
|
@ -536,7 +635,7 @@ bool IncrementalServer::Serve() {
|
|||
fileId);
|
||||
break;
|
||||
}
|
||||
D("Received prefetch request for file_id %" PRId16 ".\n", fileId);
|
||||
D("Received prefetch request for file_id %" PRId16 ".", fileId);
|
||||
prefetches_.emplace_back(files_[fileId]);
|
||||
break;
|
||||
}
|
||||
|
@ -551,6 +650,43 @@ bool IncrementalServer::Serve() {
|
|||
}
|
||||
}
|
||||
|
||||
static std::pair<unique_fd, int64_t> open_fd(const char* filepath) {
|
||||
struct stat st;
|
||||
if (stat(filepath, &st)) {
|
||||
error_exit("inc-server: failed to stat input file '%s'.", filepath);
|
||||
}
|
||||
|
||||
unique_fd fd(adb_open(filepath, O_RDONLY));
|
||||
if (fd < 0) {
|
||||
error_exit("inc-server: failed to open file '%s'.", filepath);
|
||||
}
|
||||
|
||||
return {std::move(fd), st.st_size};
|
||||
}
|
||||
|
||||
static std::pair<unique_fd, int64_t> open_signature(int64_t file_size, const char* filepath) {
|
||||
std::string signature_file(filepath);
|
||||
signature_file += IDSIG;
|
||||
|
||||
unique_fd fd(adb_open(signature_file.c_str(), O_RDONLY));
|
||||
if (fd < 0) {
|
||||
error_exit("inc-server: failed to open file '%s'.", signature_file.c_str());
|
||||
}
|
||||
|
||||
auto [tree_offset, tree_size] = skip_id_sig_headers(fd);
|
||||
if (auto expected = verity_tree_size_for_file(file_size); tree_size != expected) {
|
||||
error_exit("Verity tree size mismatch in signature file: %s [was %lld, expected %lld].\n",
|
||||
signature_file.c_str(), (long long)tree_size, (long long)expected);
|
||||
}
|
||||
|
||||
int32_t data_block_count = numBytesToNumBlocks(file_size);
|
||||
int32_t leaf_nodes_count = (data_block_count + kHashesPerBlock - 1) / kHashesPerBlock;
|
||||
D("Verity tree loaded: %s, tree size: %d (%d blocks, %d leafs)", signature_file.c_str(),
|
||||
int(tree_size), int(numBytesToNumBlocks(tree_size)), int(leaf_nodes_count));
|
||||
|
||||
return {std::move(fd), tree_offset};
|
||||
}
|
||||
|
||||
bool serve(int connection_fd, int output_fd, int argc, const char** argv) {
|
||||
auto connection_ufd = unique_fd(connection_fd);
|
||||
auto output_ufd = unique_fd(output_fd);
|
||||
|
@ -563,17 +699,11 @@ bool serve(int connection_fd, int output_fd, int argc, const char** argv) {
|
|||
for (int i = 0; i < argc; ++i) {
|
||||
auto filepath = argv[i];
|
||||
|
||||
struct stat st;
|
||||
if (stat(filepath, &st)) {
|
||||
fprintf(stderr, "Failed to stat input file %s. Abort.\n", filepath);
|
||||
return {};
|
||||
}
|
||||
auto [file_fd, file_size] = open_fd(filepath);
|
||||
auto [sign_fd, sign_offset] = open_signature(file_size, filepath);
|
||||
|
||||
unique_fd fd(adb_open(filepath, O_RDONLY));
|
||||
if (fd < 0) {
|
||||
error_exit("inc-server: failed to open file '%s'.", filepath);
|
||||
}
|
||||
files.emplace_back(filepath, i, st.st_size, std::move(fd));
|
||||
files.emplace_back(filepath, i, file_size, std::move(file_fd), sign_offset,
|
||||
std::move(sign_fd));
|
||||
}
|
||||
|
||||
IncrementalServer server(std::move(connection_ufd), std::move(output_ufd), std::move(files));
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "incremental_utils.h"
|
||||
|
||||
#include <android-base/endian.h>
|
||||
#include <android-base/mapped_file.h>
|
||||
#include <android-base/strings.h>
|
||||
#include <ziparchive/zip_archive.h>
|
||||
|
@ -28,19 +29,98 @@
|
|||
#include <numeric>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "adb_io.h"
|
||||
#include "adb_trace.h"
|
||||
#include "sysdeps.h"
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
static constexpr int kBlockSize = 4096;
|
||||
namespace incremental {
|
||||
|
||||
static constexpr inline int32_t offsetToBlockIndex(int64_t offset) {
|
||||
return (offset & ~(kBlockSize - 1)) >> 12;
|
||||
}
|
||||
|
||||
Size verity_tree_blocks_for_file(Size fileSize) {
|
||||
if (fileSize == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr int hash_per_block = kBlockSize / kDigestSize;
|
||||
|
||||
Size total_tree_block_count = 0;
|
||||
|
||||
auto block_count = 1 + (fileSize - 1) / kBlockSize;
|
||||
auto hash_block_count = block_count;
|
||||
for (auto i = 0; hash_block_count > 1; i++) {
|
||||
hash_block_count = (hash_block_count + hash_per_block - 1) / hash_per_block;
|
||||
total_tree_block_count += hash_block_count;
|
||||
}
|
||||
return total_tree_block_count;
|
||||
}
|
||||
|
||||
Size verity_tree_size_for_file(Size fileSize) {
|
||||
return verity_tree_blocks_for_file(fileSize) * kBlockSize;
|
||||
}
|
||||
|
||||
static inline int32_t read_int32(borrowed_fd fd) {
|
||||
int32_t result;
|
||||
return ReadFdExactly(fd, &result, sizeof(result)) ? result : -1;
|
||||
}
|
||||
|
||||
static inline int32_t skip_int(borrowed_fd fd) {
|
||||
return adb_lseek(fd, 4, SEEK_CUR);
|
||||
}
|
||||
|
||||
static inline void append_int(borrowed_fd fd, std::vector<char>* bytes) {
|
||||
int32_t le_val = read_int32(fd);
|
||||
auto old_size = bytes->size();
|
||||
bytes->resize(old_size + sizeof(le_val));
|
||||
memcpy(bytes->data() + old_size, &le_val, sizeof(le_val));
|
||||
}
|
||||
|
||||
static inline void append_bytes_with_size(borrowed_fd fd, std::vector<char>* bytes) {
|
||||
int32_t le_size = read_int32(fd);
|
||||
if (le_size < 0) {
|
||||
return;
|
||||
}
|
||||
int32_t size = int32_t(le32toh(le_size));
|
||||
auto old_size = bytes->size();
|
||||
bytes->resize(old_size + sizeof(le_size) + size);
|
||||
memcpy(bytes->data() + old_size, &le_size, sizeof(le_size));
|
||||
ReadFdExactly(fd, bytes->data() + old_size + sizeof(le_size), size);
|
||||
}
|
||||
|
||||
static inline int32_t skip_bytes_with_size(borrowed_fd fd) {
|
||||
int32_t le_size = read_int32(fd);
|
||||
if (le_size < 0) {
|
||||
return -1;
|
||||
}
|
||||
int32_t size = int32_t(le32toh(le_size));
|
||||
return (int32_t)adb_lseek(fd, size, SEEK_CUR);
|
||||
}
|
||||
|
||||
std::pair<std::vector<char>, int32_t> read_id_sig_headers(borrowed_fd fd) {
|
||||
std::vector<char> result;
|
||||
append_int(fd, &result); // version
|
||||
append_bytes_with_size(fd, &result); // hashingInfo
|
||||
append_bytes_with_size(fd, &result); // signingInfo
|
||||
auto le_tree_size = read_int32(fd);
|
||||
auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree
|
||||
return {std::move(result), tree_size};
|
||||
}
|
||||
|
||||
std::pair<off64_t, ssize_t> skip_id_sig_headers(borrowed_fd fd) {
|
||||
skip_int(fd); // version
|
||||
skip_bytes_with_size(fd); // hashingInfo
|
||||
auto offset = skip_bytes_with_size(fd); // signingInfo
|
||||
auto le_tree_size = read_int32(fd);
|
||||
auto tree_size = int32_t(le32toh(le_tree_size)); // size of the verity tree
|
||||
return {offset + sizeof(le_tree_size), tree_size};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T valueAt(int fd, off64_t offset) {
|
||||
static T valueAt(borrowed_fd fd, off64_t offset) {
|
||||
T t;
|
||||
memset(&t, 0, sizeof(T));
|
||||
if (adb_pread(fd, &t, sizeof(T), offset) != sizeof(T)) {
|
||||
|
@ -68,7 +148,7 @@ static void unduplicate(std::vector<T>& v) {
|
|||
v.end());
|
||||
}
|
||||
|
||||
static off64_t CentralDirOffset(int fd, int64_t fileSize) {
|
||||
static off64_t CentralDirOffset(borrowed_fd fd, Size fileSize) {
|
||||
static constexpr int kZipEocdRecMinSize = 22;
|
||||
static constexpr int32_t kZipEocdRecSig = 0x06054b50;
|
||||
static constexpr int kZipEocdCentralDirSizeFieldOffset = 12;
|
||||
|
@ -103,7 +183,7 @@ static off64_t CentralDirOffset(int fd, int64_t fileSize) {
|
|||
}
|
||||
|
||||
// Does not support APKs larger than 4GB
|
||||
static off64_t SignerBlockOffset(int fd, int64_t fileSize) {
|
||||
static off64_t SignerBlockOffset(borrowed_fd fd, Size fileSize) {
|
||||
static constexpr int kApkSigBlockMinSize = 32;
|
||||
static constexpr int kApkSigBlockFooterSize = 24;
|
||||
static constexpr int64_t APK_SIG_BLOCK_MAGIC_HI = 0x3234206b636f6c42l;
|
||||
|
@ -132,7 +212,7 @@ static off64_t SignerBlockOffset(int fd, int64_t fileSize) {
|
|||
return signerBlockOffset;
|
||||
}
|
||||
|
||||
static std::vector<int32_t> ZipPriorityBlocks(off64_t signerBlockOffset, int64_t fileSize) {
|
||||
static std::vector<int32_t> ZipPriorityBlocks(off64_t signerBlockOffset, Size fileSize) {
|
||||
int32_t signerBlockIndex = offsetToBlockIndex(signerBlockOffset);
|
||||
int32_t lastBlockIndex = offsetToBlockIndex(fileSize);
|
||||
const auto numPriorityBlocks = lastBlockIndex - signerBlockIndex + 1;
|
||||
|
@ -160,7 +240,7 @@ static std::vector<int32_t> ZipPriorityBlocks(off64_t signerBlockOffset, int64_t
|
|||
return zipPriorityBlocks;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(int fd) {
|
||||
[[maybe_unused]] static ZipArchiveHandle openZipArchiveFd(borrowed_fd fd) {
|
||||
bool transferFdOwnership = false;
|
||||
#ifdef _WIN32
|
||||
//
|
||||
|
@ -179,20 +259,22 @@ static std::vector<int32_t> ZipPriorityBlocks(off64_t signerBlockOffset, int64_t
|
|||
D("%s failed at DuplicateHandle: %d", __func__, (int)::GetLastError());
|
||||
return {};
|
||||
}
|
||||
fd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY);
|
||||
if (fd < 0) {
|
||||
int osfd = _open_osfhandle((intptr_t)dupedHandle, _O_RDONLY | _O_BINARY);
|
||||
if (osfd < 0) {
|
||||
D("%s failed at _open_osfhandle: %d", __func__, errno);
|
||||
::CloseHandle(handle);
|
||||
return {};
|
||||
}
|
||||
transferFdOwnership = true;
|
||||
#else
|
||||
int osfd = fd.get();
|
||||
#endif
|
||||
ZipArchiveHandle zip;
|
||||
if (OpenArchiveFd(fd, "apk_fd", &zip, transferFdOwnership) != 0) {
|
||||
if (OpenArchiveFd(osfd, "apk_fd", &zip, transferFdOwnership) != 0) {
|
||||
D("%s failed at OpenArchiveFd: %d", __func__, errno);
|
||||
#ifdef _WIN32
|
||||
// "_close()" is a secret WinCRT name for the regular close() function.
|
||||
_close(fd);
|
||||
_close(osfd);
|
||||
#endif
|
||||
return {};
|
||||
}
|
||||
|
@ -200,7 +282,7 @@ static std::vector<int32_t> ZipPriorityBlocks(off64_t signerBlockOffset, int64_t
|
|||
}
|
||||
|
||||
static std::pair<ZipArchiveHandle, std::unique_ptr<android::base::MappedFile>> openZipArchive(
|
||||
int fd, int64_t fileSize) {
|
||||
borrowed_fd fd, Size fileSize) {
|
||||
#ifndef __LP64__
|
||||
if (fileSize >= INT_MAX) {
|
||||
return {openZipArchiveFd(fd), nullptr};
|
||||
|
@ -220,9 +302,10 @@ static std::pair<ZipArchiveHandle, std::unique_ptr<android::base::MappedFile>> o
|
|||
return {zip, std::move(mapping)};
|
||||
}
|
||||
|
||||
static std::vector<int32_t> InstallationPriorityBlocks(int fd, int64_t fileSize) {
|
||||
static std::vector<int32_t> InstallationPriorityBlocks(borrowed_fd fd, Size fileSize) {
|
||||
static constexpr std::array<std::string_view, 3> additional_matches = {
|
||||
"resources.arsc"sv, "AndroidManifest.xml"sv, "classes.dex"sv};
|
||||
|
||||
auto [zip, _] = openZipArchive(fd, fileSize);
|
||||
if (!zip) {
|
||||
return {};
|
||||
|
@ -249,8 +332,9 @@ static std::vector<int32_t> InstallationPriorityBlocks(int fd, int64_t fileSize)
|
|||
if (entryName == "classes.dex"sv) {
|
||||
// Only the head is needed for installation
|
||||
int32_t startBlockIndex = offsetToBlockIndex(entry.offset);
|
||||
appendBlocks(startBlockIndex, 1, &installationPriorityBlocks);
|
||||
D("\tadding to priority blocks: '%.*s' 1", (int)entryName.size(), entryName.data());
|
||||
appendBlocks(startBlockIndex, 2, &installationPriorityBlocks);
|
||||
D("\tadding to priority blocks: '%.*s' (%d)", (int)entryName.size(), entryName.data(),
|
||||
2);
|
||||
} else {
|
||||
// Full entries are needed for installation
|
||||
off64_t entryStartOffset = entry.offset;
|
||||
|
@ -273,8 +357,8 @@ static std::vector<int32_t> InstallationPriorityBlocks(int fd, int64_t fileSize)
|
|||
return installationPriorityBlocks;
|
||||
}
|
||||
|
||||
namespace incremental {
|
||||
std::vector<int32_t> PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize) {
|
||||
std::vector<int32_t> PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd,
|
||||
Size fileSize) {
|
||||
if (!android::base::EndsWithIgnoreCase(filepath, ".apk")) {
|
||||
return {};
|
||||
}
|
||||
|
@ -291,4 +375,5 @@ std::vector<int32_t> PriorityBlocksForFile(const std::string& filepath, int fd,
|
|||
unduplicate(priorityBlocks);
|
||||
return priorityBlocks;
|
||||
}
|
||||
|
||||
} // namespace incremental
|
||||
|
|
|
@ -16,11 +16,31 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "adb_unique_fd.h"
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace incremental {
|
||||
std::vector<int32_t> PriorityBlocksForFile(const std::string& filepath, int fd, int64_t fileSize);
|
||||
} // namespace incremental
|
||||
|
||||
using Size = int64_t;
|
||||
constexpr int kBlockSize = 4096;
|
||||
constexpr int kSha256DigestSize = 32;
|
||||
constexpr int kDigestSize = kSha256DigestSize;
|
||||
|
||||
constexpr std::string_view IDSIG = ".idsig";
|
||||
|
||||
std::vector<int32_t> PriorityBlocksForFile(const std::string& filepath, borrowed_fd fd,
|
||||
Size fileSize);
|
||||
|
||||
Size verity_tree_blocks_for_file(Size fileSize);
|
||||
Size verity_tree_size_for_file(Size fileSize);
|
||||
|
||||
std::pair<std::vector<char>, int32_t> read_id_sig_headers(borrowed_fd fd);
|
||||
std::pair<off64_t, ssize_t> skip_id_sig_headers(borrowed_fd fd);
|
||||
|
||||
} // namespace incremental
|
||||
|
|
Loading…
Reference in a new issue