2014-08-15 23:31:52 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <errno.h>
|
2014-12-09 17:39:47 +01:00
|
|
|
#include <dirent.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <fcntl.h>
|
2017-03-30 08:57:47 +02:00
|
|
|
#include <inttypes.h>
|
2015-06-24 08:23:33 +02:00
|
|
|
#include <linux/fs.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2014-12-09 17:39:47 +01:00
|
|
|
#include <sys/stat.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
2015-06-25 11:25:36 +02:00
|
|
|
#include <fec/io.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
#include <functional>
|
2017-12-05 20:04:17 +01:00
|
|
|
#include <limits>
|
2015-08-06 00:20:27 +02:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2016-09-09 05:10:11 +02:00
|
|
|
#include <unordered_map>
|
2015-08-28 01:41:21 +02:00
|
|
|
#include <vector>
|
2015-08-06 00:20:27 +02:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
#include <android-base/file.h>
|
2016-11-23 01:29:50 +01:00
|
|
|
#include <android-base/logging.h>
|
2015-12-05 00:30:20 +01:00
|
|
|
#include <android-base/parseint.h>
|
|
|
|
#include <android-base/strings.h>
|
2016-03-23 04:19:22 +01:00
|
|
|
#include <android-base/unique_fd.h>
|
2016-12-28 23:44:05 +01:00
|
|
|
#include <applypatch/applypatch.h>
|
2017-06-30 02:04:21 +02:00
|
|
|
#include <brotli/decode.h>
|
2016-12-28 23:44:05 +01:00
|
|
|
#include <openssl/sha.h>
|
2017-03-22 00:24:57 +01:00
|
|
|
#include <private/android_filesystem_config.h>
|
2016-09-09 05:10:11 +02:00
|
|
|
#include <ziparchive/zip_archive.h>
|
2015-08-06 00:20:27 +02:00
|
|
|
|
2014-08-15 23:31:52 +02:00
|
|
|
#include "edify/expr.h"
|
2017-09-29 06:29:11 +02:00
|
|
|
#include "otafault/ota_io.h"
|
2018-02-28 00:56:11 +01:00
|
|
|
#include "otautil/cache_location.h"
|
2017-10-06 16:43:41 +02:00
|
|
|
#include "otautil/error_code.h"
|
2017-09-29 23:39:33 +02:00
|
|
|
#include "otautil/print_sha1.h"
|
|
|
|
#include "otautil/rangeset.h"
|
2017-03-26 22:36:49 +02:00
|
|
|
#include "updater/install.h"
|
2016-10-11 00:48:37 +02:00
|
|
|
#include "updater/updater.h"
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-06-10 17:58:12 +02:00
|
|
|
// Set this to 0 to interpret 'erase' transfers to mean do a
|
|
|
|
// BLKDISCARD ioctl (the normal behavior). Set to 1 to interpret
|
|
|
|
// erase to mean fill the region with zeroes.
|
|
|
|
#define DEBUG_ERASE 0
|
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
static constexpr size_t BLOCKSIZE = 4096;
|
|
|
|
static constexpr mode_t STASH_DIRECTORY_MODE = 0700;
|
|
|
|
static constexpr mode_t STASH_FILE_MODE = 0600;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-04-30 20:49:59 +02:00
|
|
|
static CauseCode failure_type = kNoCause;
|
2016-05-31 18:29:49 +02:00
|
|
|
static bool is_retry = false;
|
2016-09-09 05:10:11 +02:00
|
|
|
static std::unordered_map<std::string, RangeSet> stash_map;
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
static void DeleteLastCommandFile() {
|
2018-02-28 00:56:11 +01:00
|
|
|
std::string last_command_file = CacheLocation::location().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
if (unlink(last_command_file.c_str()) == -1 && errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "Failed to unlink: " << last_command_file;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the last command index of the last update and save the result to |last_command_index|.
|
|
|
|
// Return true if we successfully read the index.
|
|
|
|
static bool ParseLastCommandFile(int* last_command_index) {
|
2018-02-28 00:56:11 +01:00
|
|
|
std::string last_command_file = CacheLocation::location().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(last_command_file.c_str(), O_RDONLY)));
|
|
|
|
if (fd == -1) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "Failed to open " << last_command_file;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << last_command_file << " doesn't exist.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the last_command file exists, parse the last command index of previous update.
|
|
|
|
std::string content;
|
|
|
|
if (!android::base::ReadFdToString(fd.get(), &content)) {
|
|
|
|
LOG(ERROR) << "Failed to read: " << last_command_file;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> lines = android::base::Split(android::base::Trim(content), "\n");
|
|
|
|
if (lines.size() != 2) {
|
|
|
|
LOG(ERROR) << "Unexpected line counts in last command file: " << content;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!android::base::ParseInt(lines[0], last_command_index)) {
|
|
|
|
LOG(ERROR) << "Failed to parse integer in: " << lines[0];
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the last command index in the last_command_file if the current command writes to the
|
|
|
|
// stash either explicitly or implicitly.
|
|
|
|
static bool UpdateLastCommandIndex(int command_index, const std::string& command_string) {
|
2018-02-28 00:56:11 +01:00
|
|
|
std::string last_command_file = CacheLocation::location().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
std::string last_command_tmp = last_command_file + ".tmp";
|
|
|
|
std::string content = std::to_string(command_index) + "\n" + command_string;
|
|
|
|
android::base::unique_fd wfd(
|
|
|
|
TEMP_FAILURE_RETRY(open(last_command_tmp.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0660)));
|
|
|
|
if (wfd == -1 || !android::base::WriteStringToFd(content, wfd)) {
|
|
|
|
PLOG(ERROR) << "Failed to update last command";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fsync(wfd) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to fsync " << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chown(last_command_tmp.c_str(), AID_SYSTEM, AID_SYSTEM) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to change owner for " << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rename(last_command_tmp.c_str(), last_command_file.c_str()) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to rename" << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string last_command_dir = android::base::Dirname(last_command_file);
|
|
|
|
android::base::unique_fd dfd(
|
|
|
|
TEMP_FAILURE_RETRY(ota_open(last_command_dir.c_str(), O_RDONLY | O_DIRECTORY)));
|
|
|
|
if (dfd == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to open " << last_command_dir;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fsync(dfd) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to fsync " << last_command_dir;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
static int read_all(int fd, uint8_t* data, size_t size) {
|
2014-08-15 23:31:52 +02:00
|
|
|
size_t so_far = 0;
|
|
|
|
while (so_far < size) {
|
2015-12-16 01:04:53 +01:00
|
|
|
ssize_t r = TEMP_FAILURE_RETRY(ota_read(fd, data+so_far, size-so_far));
|
2015-04-29 02:24:24 +02:00
|
|
|
if (r == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFreadFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "read failed";
|
2014-12-09 17:39:47 +01:00
|
|
|
return -1;
|
2016-09-01 03:06:33 +02:00
|
|
|
} else if (r == 0) {
|
|
|
|
failure_type = kFreadFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
LOG(ERROR) << "read reached unexpected EOF.";
|
2016-09-01 03:06:33 +02:00
|
|
|
return -1;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2015-04-29 02:24:24 +02:00
|
|
|
so_far += r;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
return 0;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int read_all(int fd, std::vector<uint8_t>& buffer, size_t size) {
|
|
|
|
return read_all(fd, buffer.data(), size);
|
|
|
|
}
|
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
static int write_all(int fd, const uint8_t* data, size_t size) {
|
2014-08-15 23:31:52 +02:00
|
|
|
size_t written = 0;
|
|
|
|
while (written < size) {
|
2015-12-16 01:04:53 +01:00
|
|
|
ssize_t w = TEMP_FAILURE_RETRY(ota_write(fd, data+written, size-written));
|
2015-04-29 02:24:24 +02:00
|
|
|
if (w == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFwriteFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "write failed";
|
2014-12-09 17:39:47 +01:00
|
|
|
return -1;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2015-04-29 02:24:24 +02:00
|
|
|
written += w;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
|
|
|
return 0;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int write_all(int fd, const std::vector<uint8_t>& buffer, size_t size) {
|
|
|
|
return write_all(fd, buffer.data(), size);
|
|
|
|
}
|
|
|
|
|
2016-05-31 18:29:49 +02:00
|
|
|
static bool discard_blocks(int fd, off64_t offset, uint64_t size) {
|
2017-03-31 01:57:29 +02:00
|
|
|
// Don't discard blocks unless the update is a retry run.
|
|
|
|
if (!is_retry) {
|
2016-05-31 18:29:49 +02:00
|
|
|
return true;
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t args[2] = { static_cast<uint64_t>(offset), size };
|
|
|
|
if (ioctl(fd, BLKDISCARD, &args) == -1) {
|
|
|
|
PLOG(ERROR) << "BLKDISCARD ioctl failed";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2016-05-31 18:29:49 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 02:24:24 +02:00
|
|
|
static bool check_lseek(int fd, off64_t offset, int whence) {
|
|
|
|
off64_t rc = TEMP_FAILURE_RETRY(lseek64(fd, offset, whence));
|
|
|
|
if (rc == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kLseekFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "lseek64 failed";
|
2015-04-29 02:24:24 +02:00
|
|
|
return false;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2015-04-29 02:24:24 +02:00
|
|
|
return true;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static void allocate(size_t size, std::vector<uint8_t>& buffer) {
|
2014-08-15 23:31:52 +02:00
|
|
|
// if the buffer's big enough, reuse it.
|
2015-08-28 01:41:21 +02:00
|
|
|
if (size <= buffer.size()) return;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
buffer.resize(size);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
/**
|
|
|
|
* RangeSinkWriter reads data from the given FD, and writes them to the destination specified by the
|
|
|
|
* given RangeSet.
|
|
|
|
*/
|
|
|
|
class RangeSinkWriter {
|
|
|
|
public:
|
|
|
|
RangeSinkWriter(int fd, const RangeSet& tgt)
|
2017-06-30 02:04:21 +02:00
|
|
|
: fd_(fd),
|
|
|
|
tgt_(tgt),
|
|
|
|
next_range_(0),
|
|
|
|
current_range_left_(0),
|
|
|
|
bytes_written_(0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_NE(tgt.size(), static_cast<size_t>(0));
|
2017-03-26 23:03:52 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
bool Finished() const {
|
2017-03-31 01:57:29 +02:00
|
|
|
return next_range_ == tgt_.size() && current_range_left_ == 0;
|
2017-03-28 00:12:48 +02:00
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t AvailableSpace() const {
|
|
|
|
return tgt_.blocks() * BLOCKSIZE - bytes_written_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return number of bytes written; and 0 indicates a writing failure.
|
|
|
|
size_t Write(const uint8_t* data, size_t size) {
|
2017-03-26 23:03:52 +02:00
|
|
|
if (Finished()) {
|
|
|
|
LOG(ERROR) << "range sink write overrun; can't write " << size << " bytes";
|
|
|
|
return 0;
|
2017-03-28 00:12:48 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t written = 0;
|
2017-03-26 23:03:52 +02:00
|
|
|
while (size > 0) {
|
|
|
|
// Move to the next range as needed.
|
2017-06-30 02:04:21 +02:00
|
|
|
if (!SeekToOutputRange()) {
|
|
|
|
break;
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
size_t write_now = size;
|
|
|
|
if (current_range_left_ < write_now) {
|
|
|
|
write_now = current_range_left_;
|
|
|
|
}
|
2017-03-28 00:12:48 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (write_all(fd_, data, write_now) == -1) {
|
2017-03-28 00:12:48 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
|
|
|
|
data += write_now;
|
|
|
|
size -= write_now;
|
|
|
|
|
|
|
|
current_range_left_ -= write_now;
|
2017-07-18 20:29:40 +02:00
|
|
|
written += write_now;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
bytes_written_ += written;
|
|
|
|
return written;
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-04-04 05:01:17 +02:00
|
|
|
size_t BytesWritten() const {
|
|
|
|
return bytes_written_;
|
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
private:
|
2017-06-30 02:04:21 +02:00
|
|
|
// Set up the output cursor, move to next range if needed.
|
|
|
|
bool SeekToOutputRange() {
|
|
|
|
// We haven't finished the current range yet.
|
|
|
|
if (current_range_left_ != 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// We can't write any more; let the write function return how many bytes have been written
|
|
|
|
// so far.
|
|
|
|
if (next_range_ >= tgt_.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Range& range = tgt_[next_range_];
|
|
|
|
off64_t offset = static_cast<off64_t>(range.first) * BLOCKSIZE;
|
|
|
|
current_range_left_ = (range.second - range.first) * BLOCKSIZE;
|
|
|
|
next_range_++;
|
|
|
|
|
|
|
|
if (!discard_blocks(fd_, offset, current_range_left_)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!check_lseek(fd_, offset, SEEK_SET)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The output file descriptor.
|
2017-03-26 23:03:52 +02:00
|
|
|
int fd_;
|
2017-06-30 02:04:21 +02:00
|
|
|
// The destination ranges for the data.
|
2017-03-26 23:03:52 +02:00
|
|
|
const RangeSet& tgt_;
|
|
|
|
// The next range that we should write to.
|
|
|
|
size_t next_range_;
|
|
|
|
// The number of bytes to write before moving to the next range.
|
|
|
|
size_t current_range_left_;
|
2017-04-04 05:01:17 +02:00
|
|
|
// Total bytes written by the writer.
|
|
|
|
size_t bytes_written_;
|
2017-03-26 23:03:52 +02:00
|
|
|
};
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
/**
|
|
|
|
* All of the data for all the 'new' transfers is contained in one file in the update package,
|
|
|
|
* concatenated together in the order in which transfers.list will need it. We want to stream it out
|
|
|
|
* of the archive (it's compressed) without writing it to a temp file, but we can't write each
|
|
|
|
* section until it's that transfer's turn to go.
|
|
|
|
*
|
|
|
|
* To achieve this, we expand the new data from the archive in a background thread, and block that
|
|
|
|
* threads 'receive uncompressed data' function until the main thread has reached a point where we
|
|
|
|
* want some new data to be written. We signal the background thread with the destination for the
|
|
|
|
* data and block the main thread, waiting for the background thread to complete writing that
|
|
|
|
* section. Then it signals the main thread to wake up and goes back to blocking waiting for a
|
|
|
|
* transfer.
|
|
|
|
*
|
|
|
|
* NewThreadInfo is the struct used to pass information back and forth between the two threads. When
|
|
|
|
* the main thread wants some data written, it sets writer to the destination location and signals
|
|
|
|
* the condition. When the background thread is done writing, it clears writer and signals the
|
|
|
|
* condition again.
|
|
|
|
*/
|
2015-08-28 01:41:21 +02:00
|
|
|
struct NewThreadInfo {
|
2017-03-26 23:03:52 +02:00
|
|
|
ZipArchiveHandle za;
|
|
|
|
ZipEntry entry;
|
2017-06-30 02:04:21 +02:00
|
|
|
bool brotli_compressed;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-06-30 02:04:21 +02:00
|
|
|
std::unique_ptr<RangeSinkWriter> writer;
|
|
|
|
BrotliDecoderState* brotli_decoder_state;
|
2017-04-04 05:01:17 +02:00
|
|
|
bool receiver_available;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_t mu;
|
|
|
|
pthread_cond_t cv;
|
2015-08-28 01:41:21 +02:00
|
|
|
};
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2016-09-09 05:10:11 +02:00
|
|
|
static bool receive_new_data(const uint8_t* data, size_t size, void* cookie) {
|
2017-03-26 23:03:52 +02:00
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
while (size > 0) {
|
|
|
|
// Wait for nti->writer to be non-null, indicating some of this data is wanted.
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
while (nti->writer == nullptr) {
|
2017-10-18 22:15:21 +02:00
|
|
|
// End the new data receiver if we encounter an error when performing block image update.
|
|
|
|
if (!nti->receiver_available) {
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return false;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_wait(&nti->cv, &nti->mu);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
// At this point nti->writer is set, and we own it. The main thread is waiting for it to
|
|
|
|
// disappear from nti.
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t write_now = std::min(size, nti->writer->AvailableSpace());
|
|
|
|
if (nti->writer->Write(data, write_now) != write_now) {
|
|
|
|
LOG(ERROR) << "Failed to write " << write_now << " bytes.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
data += write_now;
|
|
|
|
size -= write_now;
|
2017-06-30 02:04:21 +02:00
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
if (nti->writer->Finished()) {
|
|
|
|
// We have written all the bytes desired by this writer.
|
|
|
|
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->writer = nullptr;
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool receive_brotli_new_data(const uint8_t* data, size_t size, void* cookie) {
|
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
|
|
|
|
|
|
|
while (size > 0 || BrotliDecoderHasMoreOutput(nti->brotli_decoder_state)) {
|
|
|
|
// Wait for nti->writer to be non-null, indicating some of this data is wanted.
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
while (nti->writer == nullptr) {
|
2017-10-18 22:15:21 +02:00
|
|
|
// End the receiver if we encounter an error when performing block image update.
|
|
|
|
if (!nti->receiver_available) {
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return false;
|
|
|
|
}
|
2017-07-18 20:29:40 +02:00
|
|
|
pthread_cond_wait(&nti->cv, &nti->mu);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
|
|
|
|
// At this point nti->writer is set, and we own it. The main thread is waiting for it to
|
|
|
|
// disappear from nti.
|
|
|
|
|
|
|
|
size_t buffer_size = std::min<size_t>(32768, nti->writer->AvailableSpace());
|
|
|
|
if (buffer_size == 0) {
|
|
|
|
LOG(ERROR) << "No space left in output range";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
uint8_t buffer[buffer_size];
|
|
|
|
size_t available_in = size;
|
|
|
|
size_t available_out = buffer_size;
|
|
|
|
uint8_t* next_out = buffer;
|
|
|
|
|
|
|
|
// The brotli decoder will update |data|, |available_in|, |next_out| and |available_out|.
|
|
|
|
BrotliDecoderResult result = BrotliDecoderDecompressStream(
|
|
|
|
nti->brotli_decoder_state, &available_in, &data, &available_out, &next_out, nullptr);
|
|
|
|
|
|
|
|
if (result == BROTLI_DECODER_RESULT_ERROR) {
|
|
|
|
LOG(ERROR) << "Decompression failed with "
|
|
|
|
<< BrotliDecoderErrorString(BrotliDecoderGetErrorCode(nti->brotli_decoder_state));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(DEBUG) << "bytes to write: " << buffer_size - available_out << ", bytes consumed "
|
|
|
|
<< size - available_in << ", decoder status " << result;
|
|
|
|
|
|
|
|
size_t write_now = buffer_size - available_out;
|
|
|
|
if (nti->writer->Write(buffer, write_now) != write_now) {
|
|
|
|
LOG(ERROR) << "Failed to write " << write_now << " bytes.";
|
2017-06-30 02:04:21 +02:00
|
|
|
return false;
|
|
|
|
}
|
2017-07-18 20:29:40 +02:00
|
|
|
|
|
|
|
// Update the remaining size. The input data ptr is already updated by brotli decoder function.
|
|
|
|
size = available_in;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (nti->writer->Finished()) {
|
|
|
|
// We have written all the bytes desired by this writer.
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->writer = nullptr;
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
return true;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void* unzip_new_data(void* cookie) {
|
2017-04-04 05:01:17 +02:00
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
2017-07-18 20:29:40 +02:00
|
|
|
if (nti->brotli_compressed) {
|
|
|
|
ProcessZipEntryContents(nti->za, &nti->entry, receive_brotli_new_data, nti);
|
|
|
|
} else {
|
|
|
|
ProcessZipEntryContents(nti->za, &nti->entry, receive_new_data, nti);
|
|
|
|
}
|
2017-04-04 05:01:17 +02:00
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->receiver_available = false;
|
|
|
|
if (nti->writer != nullptr) {
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return nullptr;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int ReadBlocks(const RangeSet& src, std::vector<uint8_t>& buffer, int fd) {
|
2017-03-31 01:57:29 +02:00
|
|
|
size_t p = 0;
|
|
|
|
for (const auto& range : src) {
|
|
|
|
if (!check_lseek(fd, static_cast<off64_t>(range.first) * BLOCKSIZE, SEEK_SET)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
size_t size = (range.second - range.first) * BLOCKSIZE;
|
|
|
|
if (read_all(fd, buffer.data() + p, size) == -1) {
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
p += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int WriteBlocks(const RangeSet& tgt, const std::vector<uint8_t>& buffer, int fd) {
|
2017-03-26 23:03:52 +02:00
|
|
|
size_t written = 0;
|
2017-03-31 01:57:29 +02:00
|
|
|
for (const auto& range : tgt) {
|
|
|
|
off64_t offset = static_cast<off64_t>(range.first) * BLOCKSIZE;
|
|
|
|
size_t size = (range.second - range.first) * BLOCKSIZE;
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!discard_blocks(fd, offset, size)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!check_lseek(fd, offset, SEEK_SET)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (write_all(fd, buffer.data() + written, size) == -1) {
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
written += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 01:56:27 +01:00
|
|
|
// Parameters for transfer list command functions
|
|
|
|
struct CommandParameters {
|
|
|
|
std::vector<std::string> tokens;
|
|
|
|
size_t cpos;
|
2017-12-05 20:04:17 +01:00
|
|
|
int cmdindex;
|
2015-12-07 01:56:27 +01:00
|
|
|
const char* cmdname;
|
|
|
|
const char* cmdline;
|
|
|
|
std::string freestash;
|
|
|
|
std::string stashbase;
|
|
|
|
bool canwrite;
|
|
|
|
int createdstash;
|
2016-03-23 04:19:22 +01:00
|
|
|
android::base::unique_fd fd;
|
2015-12-07 01:56:27 +01:00
|
|
|
bool foundwrites;
|
|
|
|
bool isunresumable;
|
|
|
|
int version;
|
|
|
|
size_t written;
|
2016-05-13 21:13:15 +02:00
|
|
|
size_t stashed;
|
2015-12-07 01:56:27 +01:00
|
|
|
NewThreadInfo nti;
|
|
|
|
pthread_t thread;
|
|
|
|
std::vector<uint8_t> buffer;
|
|
|
|
uint8_t* patch_start;
|
2017-12-05 20:04:17 +01:00
|
|
|
bool target_verified; // The target blocks have expected contents already.
|
2015-12-07 01:56:27 +01:00
|
|
|
};
|
|
|
|
|
2017-03-16 00:52:46 +01:00
|
|
|
// Print the hash in hex for corrupted source blocks (excluding the stashed blocks which is
|
|
|
|
// handled separately).
|
|
|
|
static void PrintHashForCorruptedSourceBlocks(const CommandParameters& params,
|
|
|
|
const std::vector<uint8_t>& buffer) {
|
|
|
|
LOG(INFO) << "unexpected contents of source blocks in cmd:\n" << params.cmdline;
|
|
|
|
CHECK(params.tokens[0] == "move" || params.tokens[0] == "bsdiff" ||
|
|
|
|
params.tokens[0] == "imgdiff");
|
|
|
|
|
|
|
|
size_t pos = 0;
|
|
|
|
// Command example:
|
|
|
|
// move <onehash> <tgt_range> <src_blk_count> <src_range> [<loc_range> <stashed_blocks>]
|
|
|
|
// bsdiff <offset> <len> <src_hash> <tgt_hash> <tgt_range> <src_blk_count> <src_range>
|
|
|
|
// [<loc_range> <stashed_blocks>]
|
|
|
|
if (params.tokens[0] == "move") {
|
|
|
|
// src_range for move starts at the 4th position.
|
|
|
|
if (params.tokens.size() < 5) {
|
|
|
|
LOG(ERROR) << "failed to parse source range in cmd:\n" << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pos = 4;
|
|
|
|
} else {
|
|
|
|
// src_range for diff starts at the 7th position.
|
|
|
|
if (params.tokens.size() < 8) {
|
|
|
|
LOG(ERROR) << "failed to parse source range in cmd:\n" << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pos = 7;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Source blocks in stash only, no work to do.
|
|
|
|
if (params.tokens[pos] == "-") {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[pos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
if (!src) {
|
|
|
|
LOG(ERROR) << "Failed to parse range in " << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
2017-03-16 00:52:46 +01:00
|
|
|
|
|
|
|
RangeSet locs;
|
|
|
|
// If there's no stashed blocks, content in the buffer is consecutive and has the same
|
|
|
|
// order as the source blocks.
|
|
|
|
if (pos == params.tokens.size()) {
|
2017-03-31 01:57:29 +02:00
|
|
|
locs = RangeSet(std::vector<Range>{ Range{ 0, src.blocks() } });
|
2017-03-16 00:52:46 +01:00
|
|
|
} else {
|
|
|
|
// Otherwise, the next token is the offset of the source blocks in the target range.
|
|
|
|
// Example: for the tokens <4,63946,63947,63948,63979> <4,6,7,8,39> <stashed_blocks>;
|
|
|
|
// We want to print SHA-1 for the data in buffer[6], buffer[8], buffer[9] ... buffer[38];
|
|
|
|
// this corresponds to the 32 src blocks #63946, #63948, #63949 ... #63978.
|
2017-03-26 22:36:49 +02:00
|
|
|
locs = RangeSet::Parse(params.tokens[pos++]);
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_EQ(src.blocks(), locs.blocks());
|
2017-03-16 00:52:46 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "printing hash in hex for " << src.blocks() << " source blocks";
|
|
|
|
for (size_t i = 0; i < src.blocks(); i++) {
|
2017-03-26 22:36:49 +02:00
|
|
|
size_t block_num = src.GetBlockNumber(i);
|
|
|
|
size_t buffer_index = locs.GetBlockNumber(i);
|
2017-03-16 00:52:46 +01:00
|
|
|
CHECK_LE((buffer_index + 1) * BLOCKSIZE, buffer.size());
|
|
|
|
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1(buffer.data() + buffer_index * BLOCKSIZE, BLOCKSIZE, digest);
|
|
|
|
std::string hexdigest = print_sha1(digest);
|
|
|
|
LOG(INFO) << " block number: " << block_num << ", SHA-1: " << hexdigest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the calculated hash for the whole stash doesn't match the stash id, print the SHA-1
|
|
|
|
// in hex for each block.
|
|
|
|
static void PrintHashForCorruptedStashedBlocks(const std::string& id,
|
|
|
|
const std::vector<uint8_t>& buffer,
|
|
|
|
const RangeSet& src) {
|
|
|
|
LOG(INFO) << "printing hash in hex for stash_id: " << id;
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_EQ(src.blocks() * BLOCKSIZE, buffer.size());
|
2017-03-16 00:52:46 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
for (size_t i = 0; i < src.blocks(); i++) {
|
2017-03-26 22:36:49 +02:00
|
|
|
size_t block_num = src.GetBlockNumber(i);
|
2017-03-16 00:52:46 +01:00
|
|
|
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1(buffer.data() + i * BLOCKSIZE, BLOCKSIZE, digest);
|
|
|
|
std::string hexdigest = print_sha1(digest);
|
|
|
|
LOG(INFO) << " block number: " << block_num << ", SHA-1: " << hexdigest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the stash file doesn't exist, read the source blocks this stash contains and print the
|
|
|
|
// SHA-1 for these blocks.
|
|
|
|
static void PrintHashForMissingStashedBlocks(const std::string& id, int fd) {
|
|
|
|
if (stash_map.find(id) == stash_map.end()) {
|
|
|
|
LOG(ERROR) << "No stash saved for id: " << id;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << "print hash in hex for source blocks in missing stash: " << id;
|
|
|
|
const RangeSet& src = stash_map[id];
|
2017-03-31 01:57:29 +02:00
|
|
|
std::vector<uint8_t> buffer(src.blocks() * BLOCKSIZE);
|
2017-03-16 00:52:46 +01:00
|
|
|
if (ReadBlocks(src, buffer, fd) == -1) {
|
|
|
|
LOG(ERROR) << "failed to read source blocks for stash: " << id;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PrintHashForCorruptedStashedBlocks(id, buffer, src);
|
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int VerifyBlocks(const std::string& expected, const std::vector<uint8_t>& buffer,
|
2015-08-28 01:41:21 +02:00
|
|
|
const size_t blocks, bool printerror) {
|
2016-02-04 09:23:21 +01:00
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
2015-08-28 01:41:21 +02:00
|
|
|
const uint8_t* data = buffer.data();
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2016-02-04 09:23:21 +01:00
|
|
|
SHA1(data, blocks * BLOCKSIZE, digest);
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2015-08-06 00:20:27 +02:00
|
|
|
std::string hexdigest = print_sha1(digest);
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
if (hexdigest != expected) {
|
|
|
|
if (printerror) {
|
2016-11-23 01:29:50 +01:00
|
|
|
LOG(ERROR) << "failed to verify blocks (expected " << expected << ", read "
|
|
|
|
<< hexdigest << ")";
|
2015-08-28 01:41:21 +02:00
|
|
|
}
|
|
|
|
return -1;
|
2014-09-08 21:22:09 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static std::string GetStashFileName(const std::string& base, const std::string& id,
|
|
|
|
const std::string& postfix) {
|
2015-08-06 00:20:27 +02:00
|
|
|
if (base.empty()) {
|
|
|
|
return "";
|
2014-09-08 21:22:09 +02:00
|
|
|
}
|
|
|
|
|
2018-02-28 00:56:11 +01:00
|
|
|
std::string fn(CacheLocation::location().stash_directory_base());
|
2015-08-06 00:20:27 +02:00
|
|
|
fn += "/" + base + "/" + id + postfix;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
|
|
|
return fn;
|
2014-09-08 21:22:09 +02:00
|
|
|
}
|
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
// Does a best effort enumeration of stash files. Ignores possible non-file items in the stash
|
|
|
|
// directory and continues despite of errors. Calls the 'callback' function for each file.
|
|
|
|
static void EnumerateStash(const std::string& dirname,
|
|
|
|
const std::function<void(const std::string&)>& callback) {
|
|
|
|
if (dirname.empty()) return;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
std::unique_ptr<DIR, decltype(&closedir)> directory(opendir(dirname.c_str()), closedir);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (directory == nullptr) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "opendir \"" << dirname << "\" failed";
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2016-12-28 23:44:05 +01:00
|
|
|
return;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
dirent* item;
|
|
|
|
while ((item = readdir(directory.get())) != nullptr) {
|
|
|
|
if (item->d_type != DT_REG) continue;
|
|
|
|
callback(dirname + "/" + item->d_name);
|
2016-12-28 23:44:05 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
// Deletes the stash directory and all files in it. Assumes that it only
|
|
|
|
// contains files. There is nothing we can do about unlikely, but possible
|
|
|
|
// errors, so they are merely logged.
|
2017-03-16 01:39:01 +01:00
|
|
|
static void DeleteFile(const std::string& fn) {
|
|
|
|
if (fn.empty()) return;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
LOG(INFO) << "deleting " << fn;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (unlink(fn.c_str()) == -1 && errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "unlink \"" << fn << "\" failed";
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-06 00:20:27 +02:00
|
|
|
static void DeleteStash(const std::string& base) {
|
2017-03-16 01:39:01 +01:00
|
|
|
if (base.empty()) return;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
LOG(INFO) << "deleting stash " << base;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
std::string dirname = GetStashFileName(base, "", "");
|
|
|
|
EnumerateStash(dirname, DeleteFile);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (rmdir(dirname.c_str()) == -1) {
|
|
|
|
if (errno != ENOENT && errno != ENOTDIR) {
|
|
|
|
PLOG(ERROR) << "rmdir \"" << dirname << "\" failed";
|
2014-08-21 19:47:24 +02:00
|
|
|
}
|
2017-03-16 01:39:01 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
static int LoadStash(CommandParameters& params, const std::string& id, bool verify, size_t* blocks,
|
|
|
|
std::vector<uint8_t>& buffer, bool printnoent) {
|
2017-03-31 01:57:29 +02:00
|
|
|
// In verify mode, if source range_set was saved for the given hash, check contents in the source
|
|
|
|
// blocks first. If the check fails, search for the stashed files on /cache as usual.
|
|
|
|
if (!params.canwrite) {
|
|
|
|
if (stash_map.find(id) != stash_map.end()) {
|
|
|
|
const RangeSet& src = stash_map[id];
|
|
|
|
allocate(src.blocks() * BLOCKSIZE, buffer);
|
2015-08-28 01:41:21 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (ReadBlocks(src, buffer, params.fd) == -1) {
|
|
|
|
LOG(ERROR) << "failed to read source blocks in stash map.";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (VerifyBlocks(id, buffer, src.blocks(), true) != 0) {
|
|
|
|
LOG(ERROR) << "failed to verify loaded source blocks in stash map.";
|
|
|
|
PrintHashForCorruptedStashedBlocks(id, buffer, src);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
size_t blockcount = 0;
|
|
|
|
if (!blocks) {
|
|
|
|
blocks = &blockcount;
|
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
std::string fn = GetStashFileName(params.stashbase, id, "");
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
struct stat sb;
|
|
|
|
if (stat(fn.c_str(), &sb) == -1) {
|
|
|
|
if (errno != ENOENT || printnoent) {
|
|
|
|
PLOG(ERROR) << "stat \"" << fn << "\" failed";
|
|
|
|
PrintHashForMissingStashedBlocks(id, params.fd);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " loading " << fn;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if ((sb.st_size % BLOCKSIZE) != 0) {
|
|
|
|
LOG(ERROR) << fn << " size " << sb.st_size << " not multiple of block size " << BLOCKSIZE;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
android::base::unique_fd fd(TEMP_FAILURE_RETRY(ota_open(fn.c_str(), O_RDONLY)));
|
|
|
|
if (fd == -1) {
|
|
|
|
PLOG(ERROR) << "open \"" << fn << "\" failed";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
allocate(sb.st_size, buffer);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (read_all(fd, buffer, sb.st_size) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
*blocks = sb.st_size / BLOCKSIZE;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (verify && VerifyBlocks(id, buffer, *blocks, true) != 0) {
|
|
|
|
LOG(ERROR) << "unexpected contents in " << fn;
|
|
|
|
if (stash_map.find(id) == stash_map.end()) {
|
|
|
|
LOG(ERROR) << "failed to find source blocks number for stash " << id
|
|
|
|
<< " when executing command: " << params.cmdname;
|
|
|
|
} else {
|
|
|
|
const RangeSet& src = stash_map[id];
|
|
|
|
PrintHashForCorruptedStashedBlocks(id, buffer, src);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
DeleteFile(fn);
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int WriteStash(const std::string& base, const std::string& id, int blocks,
|
2017-03-23 22:43:44 +01:00
|
|
|
std::vector<uint8_t>& buffer, bool checkspace, bool* exists) {
|
2015-08-28 01:41:21 +02:00
|
|
|
if (base.empty()) {
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
if (checkspace && CacheSizeCheck(blocks * BLOCKSIZE) != 0) {
|
2016-11-23 01:29:50 +01:00
|
|
|
LOG(ERROR) << "not enough space to write stash";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
std::string fn = GetStashFileName(base, id, ".partial");
|
|
|
|
std::string cn = GetStashFileName(base, id, "");
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-04-17 13:50:31 +02:00
|
|
|
if (exists) {
|
2015-08-28 01:41:21 +02:00
|
|
|
struct stat sb;
|
|
|
|
int res = stat(cn.c_str(), &sb);
|
2015-04-17 13:50:31 +02:00
|
|
|
|
|
|
|
if (res == 0) {
|
|
|
|
// The file already exists and since the name is the hash of the contents,
|
|
|
|
// it's safe to assume the contents are identical (accidental hash collisions
|
|
|
|
// are unlikely)
|
2016-11-23 01:29:50 +01:00
|
|
|
LOG(INFO) << " skipping " << blocks << " existing blocks in " << cn;
|
2015-08-28 01:41:21 +02:00
|
|
|
*exists = true;
|
|
|
|
return 0;
|
2015-04-17 13:50:31 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
*exists = false;
|
2015-04-17 13:50:31 +02:00
|
|
|
}
|
|
|
|
|
2016-11-23 01:29:50 +01:00
|
|
|
LOG(INFO) << " writing " << blocks << " blocks to " << cn;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2016-11-23 01:29:50 +01:00
|
|
|
android::base::unique_fd fd(
|
|
|
|
TEMP_FAILURE_RETRY(ota_open(fn.c_str(), O_WRONLY | O_CREAT | O_TRUNC, STASH_FILE_MODE)));
|
2014-12-09 17:39:47 +01:00
|
|
|
if (fd == -1) {
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "failed to create \"" << fn << "\"";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-22 00:24:57 +01:00
|
|
|
if (fchown(fd, AID_SYSTEM, AID_SYSTEM) != 0) { // system user
|
|
|
|
PLOG(ERROR) << "failed to chown \"" << fn << "\"";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
if (write_all(fd, buffer, blocks * BLOCKSIZE) == -1) {
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-12-16 01:04:53 +01:00
|
|
|
if (ota_fsync(fd) == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFsyncFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "fsync \"" << fn << "\" failed";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-06 00:20:27 +02:00
|
|
|
if (rename(fn.c_str(), cn.c_str()) == -1) {
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "rename(\"" << fn << "\", \"" << cn << "\") failed";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
std::string dname = GetStashFileName(base, "", "");
|
2016-03-23 04:19:22 +01:00
|
|
|
android::base::unique_fd dfd(TEMP_FAILURE_RETRY(ota_open(dname.c_str(),
|
|
|
|
O_RDONLY | O_DIRECTORY)));
|
2015-08-01 00:56:44 +02:00
|
|
|
if (dfd == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFileOpenFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "failed to open \"" << dname << "\" failed";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2015-08-01 00:56:44 +02:00
|
|
|
}
|
|
|
|
|
2015-12-16 01:04:53 +01:00
|
|
|
if (ota_fsync(dfd) == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFsyncFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "fsync \"" << dname << "\" failed";
|
2015-08-28 01:41:21 +02:00
|
|
|
return -1;
|
2015-08-01 00:56:44 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a directory for storing stash files and checks if the /cache partition
|
|
|
|
// hash enough space for the expected amount of blocks we need to store. Returns
|
|
|
|
// >0 if we created the directory, zero if it existed already, and <0 of failure.
|
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
static int CreateStash(State* state, size_t maxblocks, const std::string& blockdev,
|
|
|
|
std::string& base) {
|
|
|
|
if (blockdev.empty()) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
// Stash directory should be different for each partition to avoid conflicts
|
|
|
|
// when updating multiple partitions at the same time, so we use the hash of
|
|
|
|
// the block device name as the base directory
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1(reinterpret_cast<const uint8_t*>(blockdev.data()), blockdev.size(), digest);
|
|
|
|
base = print_sha1(digest);
|
|
|
|
|
|
|
|
std::string dirname = GetStashFileName(base, "", "");
|
|
|
|
struct stat sb;
|
|
|
|
int res = stat(dirname.c_str(), &sb);
|
|
|
|
size_t max_stash_size = maxblocks * BLOCKSIZE;
|
|
|
|
|
|
|
|
if (res == -1 && errno != ENOENT) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "stat \"%s\" failed: %s", dirname.c_str(),
|
2016-12-28 23:44:05 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
} else if (res != 0) {
|
|
|
|
LOG(INFO) << "creating stash " << dirname;
|
|
|
|
res = mkdir(dirname.c_str(), STASH_DIRECTORY_MODE);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
if (res != 0) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "mkdir \"%s\" failed: %s", dirname.c_str(),
|
2016-12-28 23:44:05 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-22 00:24:57 +01:00
|
|
|
if (chown(dirname.c_str(), AID_SYSTEM, AID_SYSTEM) != 0) { // system user
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "chown \"%s\" failed: %s", dirname.c_str(),
|
2017-03-22 00:24:57 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
if (CacheSizeCheck(max_stash_size) != 0) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "not enough space for stash (%zu needed)",
|
2016-12-28 23:44:05 +01:00
|
|
|
max_stash_size);
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
return 1; // Created directory
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
LOG(INFO) << "using existing stash " << dirname;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
// If the directory already exists, calculate the space already allocated to stash files and check
|
|
|
|
// if there's enough for all required blocks. Delete any partially completed stash files first.
|
|
|
|
EnumerateStash(dirname, [](const std::string& fn) {
|
|
|
|
if (android::base::EndsWith(fn, ".partial")) {
|
|
|
|
DeleteFile(fn);
|
|
|
|
}
|
|
|
|
});
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
size_t existing = 0;
|
2017-03-16 01:39:01 +01:00
|
|
|
EnumerateStash(dirname, [&existing](const std::string& fn) {
|
|
|
|
if (fn.empty()) return;
|
|
|
|
struct stat sb;
|
|
|
|
if (stat(fn.c_str(), &sb) == -1) {
|
|
|
|
PLOG(ERROR) << "stat \"" << fn << "\" failed";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
existing += static_cast<size_t>(sb.st_size);
|
|
|
|
});
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
if (max_stash_size > existing) {
|
|
|
|
size_t needed = max_stash_size - existing;
|
|
|
|
if (CacheSizeCheck(needed) != 0) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "not enough space for stash (%zu more needed)",
|
2016-12-28 23:44:05 +01:00
|
|
|
needed);
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2016-12-28 23:44:05 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
return 0; // Using existing directory
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 01:56:27 +01:00
|
|
|
static int FreeStash(const std::string& base, const std::string& id) {
|
2017-03-16 01:39:01 +01:00
|
|
|
if (base.empty() || id.empty()) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
DeleteFile(GetStashFileName(base, id, ""));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
// Source contains packed data, which we want to move to the locations given in locs in the dest
|
|
|
|
// buffer. source and dest may be the same buffer.
|
2015-08-28 01:41:21 +02:00
|
|
|
static void MoveRange(std::vector<uint8_t>& dest, const RangeSet& locs,
|
2017-03-31 01:57:29 +02:00
|
|
|
const std::vector<uint8_t>& source) {
|
|
|
|
const uint8_t* from = source.data();
|
|
|
|
uint8_t* to = dest.data();
|
|
|
|
size_t start = locs.blocks();
|
|
|
|
// Must do the movement backward.
|
|
|
|
for (auto it = locs.crbegin(); it != locs.crend(); it++) {
|
|
|
|
size_t blocks = it->second - it->first;
|
|
|
|
start -= blocks;
|
|
|
|
memmove(to + (it->first * BLOCKSIZE), from + (start * BLOCKSIZE), blocks * BLOCKSIZE);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
/**
|
|
|
|
* We expect to parse the remainder of the parameter tokens as one of:
|
|
|
|
*
|
|
|
|
* <src_block_count> <src_range>
|
|
|
|
* (loads data from source image only)
|
|
|
|
*
|
|
|
|
* <src_block_count> - <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from stashes only)
|
|
|
|
*
|
|
|
|
* <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from both source image and stashes)
|
|
|
|
*
|
|
|
|
* On return, params.buffer is filled with the loaded source data (rearranged and combined with
|
|
|
|
* stashed data as necessary). buffer may be reallocated if needed to accommodate the source data.
|
|
|
|
* tgt is the target RangeSet for detecting overlaps. Any stashes required are loaded using
|
|
|
|
* LoadStash.
|
|
|
|
*/
|
|
|
|
static int LoadSourceBlocks(CommandParameters& params, const RangeSet& tgt, size_t* src_blocks,
|
|
|
|
bool* overlap) {
|
|
|
|
CHECK(src_blocks != nullptr);
|
|
|
|
CHECK(overlap != nullptr);
|
|
|
|
|
|
|
|
// <src_block_count>
|
|
|
|
const std::string& token = params.tokens[params.cpos++];
|
|
|
|
if (!android::base::ParseUint(token, src_blocks)) {
|
|
|
|
LOG(ERROR) << "invalid src_block_count \"" << token << "\"";
|
|
|
|
return -1;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
allocate(*src_blocks * BLOCKSIZE, params.buffer);
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// "-" or <src_range> [<src_loc>]
|
|
|
|
if (params.tokens[params.cpos] == "-") {
|
|
|
|
// no source ranges, only stashes
|
|
|
|
params.cpos++;
|
|
|
|
} else {
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(src));
|
2017-03-26 22:36:49 +02:00
|
|
|
*overlap = src.Overlaps(tgt);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (ReadBlocks(src, params.buffer, params.fd) == -1) {
|
|
|
|
return -1;
|
2015-12-07 01:56:27 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
// no stashes, only source range
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet locs = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(locs));
|
2017-03-23 22:43:44 +01:00
|
|
|
MoveRange(params.buffer, locs, params.buffer);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// <[stash_id:stash_range]>
|
|
|
|
while (params.cpos < params.tokens.size()) {
|
|
|
|
// Each word is a an index into the stash table, a colon, and then a RangeSet describing where
|
|
|
|
// in the source block that stashed data should go.
|
|
|
|
std::vector<std::string> tokens = android::base::Split(params.tokens[params.cpos++], ":");
|
|
|
|
if (tokens.size() != 2) {
|
|
|
|
LOG(ERROR) << "invalid parameter";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
std::vector<uint8_t> stash;
|
|
|
|
if (LoadStash(params, tokens[0], false, nullptr, stash, true) == -1) {
|
|
|
|
// These source blocks will fail verification if used later, but we
|
|
|
|
// will let the caller decide if this is a fatal failure
|
|
|
|
LOG(ERROR) << "failed to load stash " << tokens[0];
|
|
|
|
continue;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet locs = RangeSet::Parse(tokens[1]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(locs));
|
2017-03-23 22:43:44 +01:00
|
|
|
MoveRange(params.buffer, locs, stash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
/**
|
|
|
|
* Do a source/target load for move/bsdiff/imgdiff in version 3.
|
|
|
|
*
|
|
|
|
* We expect to parse the remainder of the parameter tokens as one of:
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> <src_range>
|
|
|
|
* (loads data from source image only)
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> - <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from stashes only)
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from both source image and stashes)
|
|
|
|
*
|
2017-03-23 22:43:44 +01:00
|
|
|
* 'onehash' tells whether to expect separate source and targe block hashes, or if they are both the
|
|
|
|
* same and only one hash should be expected. params.isunresumable will be set to true if block
|
2017-03-13 22:57:34 +01:00
|
|
|
* verification fails in a way that the update cannot be resumed anymore.
|
|
|
|
*
|
|
|
|
* If the function is unable to load the necessary blocks or their contents don't match the hashes,
|
|
|
|
* the return value is -1 and the command should be aborted.
|
|
|
|
*
|
|
|
|
* If the return value is 1, the command has already been completed according to the contents of the
|
|
|
|
* target blocks, and should not be performed again.
|
|
|
|
*
|
|
|
|
* If the return value is 0, source blocks have expected content and the command can be performed.
|
|
|
|
*/
|
2017-03-23 22:43:44 +01:00
|
|
|
static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t* src_blocks,
|
|
|
|
bool onehash, bool* overlap) {
|
|
|
|
CHECK(src_blocks != nullptr);
|
|
|
|
CHECK(overlap != nullptr);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing source hash";
|
|
|
|
return -1;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
std::string srchash = params.tokens[params.cpos++];
|
|
|
|
std::string tgthash;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (onehash) {
|
|
|
|
tgthash = srchash;
|
|
|
|
} else {
|
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target hash";
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-23 22:43:44 +01:00
|
|
|
tgthash = params.tokens[params.cpos++];
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// At least it needs to provide three parameters: <tgt_range>, <src_block_count> and
|
|
|
|
// "-"/<src_range>.
|
|
|
|
if (params.cpos + 2 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "invalid parameters";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// <tgt_range>
|
2017-03-26 22:36:49 +02:00
|
|
|
tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
std::vector<uint8_t> tgtbuffer(tgt.blocks() * BLOCKSIZE);
|
2017-03-23 22:43:44 +01:00
|
|
|
if (ReadBlocks(tgt, tgtbuffer, params.fd) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Return now if target blocks already have expected content.
|
2017-03-31 01:57:29 +02:00
|
|
|
if (VerifyBlocks(tgthash, tgtbuffer, tgt.blocks(), false) == 0) {
|
2017-03-23 22:43:44 +01:00
|
|
|
return 1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Load source blocks.
|
|
|
|
if (LoadSourceBlocks(params, tgt, src_blocks, overlap) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (VerifyBlocks(srchash, params.buffer, *src_blocks, true) == 0) {
|
|
|
|
// If source and target blocks overlap, stash the source blocks so we can
|
|
|
|
// resume from possible write errors. In verify mode, we can skip stashing
|
|
|
|
// because the source blocks won't be overwritten.
|
|
|
|
if (*overlap && params.canwrite) {
|
|
|
|
LOG(INFO) << "stashing " << *src_blocks << " overlapping blocks to " << srchash;
|
|
|
|
|
|
|
|
bool stash_exists = false;
|
|
|
|
if (WriteStash(params.stashbase, srchash, *src_blocks, params.buffer, true,
|
|
|
|
&stash_exists) != 0) {
|
|
|
|
LOG(ERROR) << "failed to stash overlapping source blocks";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
if (!UpdateLastCommandIndex(params.cmdindex, params.cmdline)) {
|
|
|
|
LOG(WARNING) << "Failed to update the last command file.";
|
|
|
|
}
|
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
params.stashed += *src_blocks;
|
|
|
|
// Can be deleted when the write has completed.
|
|
|
|
if (!stash_exists) {
|
|
|
|
params.freestash = srchash;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Source blocks have expected content, command can proceed.
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (*overlap && LoadStash(params, srchash, true, nullptr, params.buffer, true) == 0) {
|
|
|
|
// Overlapping source blocks were previously stashed, command can proceed. We are recovering
|
|
|
|
// from an interrupted command, so we don't know if the stash can safely be deleted after this
|
|
|
|
// command.
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-16 00:52:46 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Valid source data not available, update cannot be resumed.
|
|
|
|
LOG(ERROR) << "partition has unexpected contents";
|
|
|
|
PrintHashForCorruptedSourceBlocks(params, params.buffer);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
params.isunresumable = true;
|
|
|
|
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandMove(CommandParameters& params) {
|
2017-03-13 22:57:34 +01:00
|
|
|
size_t blocks = 0;
|
|
|
|
bool overlap = false;
|
|
|
|
RangeSet tgt;
|
2017-03-23 22:43:44 +01:00
|
|
|
int status = LoadSrcTgtVersion3(params, tgt, &blocks, true, &overlap);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (status == -1) {
|
|
|
|
LOG(ERROR) << "failed to read blocks for move";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (status == 0) {
|
|
|
|
params.foundwrites = true;
|
2017-12-05 20:04:17 +01:00
|
|
|
} else {
|
|
|
|
params.target_verified = true;
|
|
|
|
if (params.foundwrites) {
|
|
|
|
LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
if (status == 0) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << " moving " << blocks << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (WriteBlocks(tgt, params.buffer, params.fd) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LOG(INFO) << "skipping " << blocks << " already moved blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (!params.freestash.empty()) {
|
|
|
|
FreeStash(params.stashbase, params.freestash);
|
|
|
|
params.freestash.clear();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandStash(CommandParameters& params) {
|
2017-03-23 23:28:20 +01:00
|
|
|
// <stash_id> <src_range>
|
|
|
|
if (params.cpos + 1 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing id and/or src range fields in stash command";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
const std::string& id = params.tokens[params.cpos++];
|
|
|
|
size_t blocks = 0;
|
|
|
|
if (LoadStash(params, id, true, &blocks, params.buffer, false) == 0) {
|
|
|
|
// Stash file already exists and has expected contents. Do not read from source again, as the
|
|
|
|
// source may have been already overwritten during a previous attempt.
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(src));
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
allocate(src.blocks() * BLOCKSIZE, params.buffer);
|
2017-03-23 23:28:20 +01:00
|
|
|
if (ReadBlocks(src, params.buffer, params.fd) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
blocks = src.blocks();
|
2017-03-23 23:28:20 +01:00
|
|
|
stash_map[id] = src;
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
if (VerifyBlocks(id, params.buffer, blocks, true) != 0) {
|
|
|
|
// Source blocks have unexpected contents. If we actually need this data later, this is an
|
|
|
|
// unrecoverable error. However, the command that uses the data may have already completed
|
|
|
|
// previously, so the possible failure will occur during source block verification.
|
|
|
|
LOG(ERROR) << "failed to load source blocks for stash " << id;
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
// In verify mode, we don't need to stash any blocks.
|
|
|
|
if (!params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
return 0;
|
2017-03-23 23:28:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << "stashing " << blocks << " blocks to " << id;
|
2017-12-05 20:04:17 +01:00
|
|
|
int result = WriteStash(params.stashbase, id, blocks, params.buffer, false, nullptr);
|
|
|
|
if (result == 0) {
|
|
|
|
if (!UpdateLastCommandIndex(params.cmdindex, params.cmdline)) {
|
|
|
|
LOG(WARNING) << "Failed to update the last command file.";
|
|
|
|
}
|
|
|
|
|
|
|
|
params.stashed += blocks;
|
|
|
|
}
|
|
|
|
return result;
|
2017-03-23 23:28:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int PerformCommandFree(CommandParameters& params) {
|
|
|
|
// <stash_id>
|
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing stash id in free command";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string& id = params.tokens[params.cpos++];
|
|
|
|
stash_map.erase(id);
|
|
|
|
|
|
|
|
if (params.createdstash || params.canwrite) {
|
|
|
|
return FreeStash(params.stashbase, id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandZero(CommandParameters& params) {
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for zero";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " zeroing " << tgt.blocks() << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
allocate(BLOCKSIZE, params.buffer);
|
|
|
|
memset(params.buffer.data(), 0, BLOCKSIZE);
|
|
|
|
|
|
|
|
if (params.canwrite) {
|
|
|
|
for (const auto& range : tgt) {
|
|
|
|
off64_t offset = static_cast<off64_t>(range.first) * BLOCKSIZE;
|
|
|
|
size_t size = (range.second - range.first) * BLOCKSIZE;
|
|
|
|
if (!discard_blocks(params.fd, offset, size)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (!check_lseek(params.fd, offset, SEEK_SET)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
for (size_t j = range.first; j < range.second; ++j) {
|
|
|
|
if (write_all(params.fd, params.buffer, BLOCKSIZE) == -1) {
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cmdname[0] == 'z') {
|
|
|
|
// Update only for the zero command, as the erase command will call
|
|
|
|
// this if DEBUG_ERASE is defined.
|
|
|
|
params.written += tgt.blocks();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandNew(CommandParameters& params) {
|
2017-03-26 23:03:52 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for new";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (params.canwrite) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " writing " << tgt.blocks() << " blocks of new data";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_lock(¶ms.nti.mu);
|
2017-07-18 20:29:40 +02:00
|
|
|
params.nti.writer = std::make_unique<RangeSinkWriter>(params.fd, tgt);
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_broadcast(¶ms.nti.cv);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
while (params.nti.writer != nullptr) {
|
2017-04-04 05:01:17 +02:00
|
|
|
if (!params.nti.receiver_available) {
|
|
|
|
LOG(ERROR) << "missing " << (tgt.blocks() * BLOCKSIZE - params.nti.writer->BytesWritten())
|
|
|
|
<< " bytes of new data";
|
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_wait(¶ms.nti.cv, ¶ms.nti.mu);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2017-03-26 23:03:52 +02:00
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandDiff(CommandParameters& params) {
|
2017-02-01 19:20:10 +01:00
|
|
|
// <offset> <length>
|
|
|
|
if (params.cpos + 1 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing patch offset or length for " << params.cmdname;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
size_t offset;
|
|
|
|
if (!android::base::ParseUint(params.tokens[params.cpos++], &offset)) {
|
|
|
|
LOG(ERROR) << "invalid patch offset";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
size_t len;
|
|
|
|
if (!android::base::ParseUint(params.tokens[params.cpos++], &len)) {
|
|
|
|
LOG(ERROR) << "invalid patch len";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
RangeSet tgt;
|
|
|
|
size_t blocks = 0;
|
|
|
|
bool overlap = false;
|
|
|
|
int status = LoadSrcTgtVersion3(params, tgt, &blocks, false, &overlap);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (status == -1) {
|
|
|
|
LOG(ERROR) << "failed to read blocks for diff";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (status == 0) {
|
|
|
|
params.foundwrites = true;
|
2017-12-05 20:04:17 +01:00
|
|
|
} else {
|
|
|
|
params.target_verified = true;
|
|
|
|
if (params.foundwrites) {
|
|
|
|
LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
|
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
if (status == 0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "patching " << blocks << " blocks to " << tgt.blocks();
|
2017-02-01 19:20:10 +01:00
|
|
|
Value patch_value(
|
|
|
|
VAL_BLOB, std::string(reinterpret_cast<const char*>(params.patch_start + offset), len));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
RangeSinkWriter writer(params.fd, tgt);
|
2017-02-01 19:20:10 +01:00
|
|
|
if (params.cmdname[0] == 'i') { // imgdiff
|
2017-11-10 20:49:53 +01:00
|
|
|
if (ApplyImagePatch(params.buffer.data(), blocks * BLOCKSIZE, patch_value,
|
2017-03-26 23:03:52 +02:00
|
|
|
std::bind(&RangeSinkWriter::Write, &writer, std::placeholders::_1,
|
|
|
|
std::placeholders::_2),
|
|
|
|
nullptr, nullptr) != 0) {
|
2017-02-01 19:20:10 +01:00
|
|
|
LOG(ERROR) << "Failed to apply image patch.";
|
2017-05-17 00:51:46 +02:00
|
|
|
failure_type = kPatchApplicationFailure;
|
2017-02-01 19:20:10 +01:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
} else {
|
2017-11-10 20:49:53 +01:00
|
|
|
if (ApplyBSDiffPatch(params.buffer.data(), blocks * BLOCKSIZE, patch_value, 0,
|
2017-03-26 23:03:52 +02:00
|
|
|
std::bind(&RangeSinkWriter::Write, &writer, std::placeholders::_1,
|
|
|
|
std::placeholders::_2),
|
|
|
|
nullptr) != 0) {
|
2017-02-01 19:20:10 +01:00
|
|
|
LOG(ERROR) << "Failed to apply bsdiff patch.";
|
2017-05-17 00:51:46 +02:00
|
|
|
failure_type = kPatchApplicationFailure;
|
2017-02-01 19:20:10 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
// We expect the output of the patcher to fill the tgt ranges exactly.
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!writer.Finished()) {
|
2017-02-01 19:20:10 +01:00
|
|
|
LOG(ERROR) << "range sink underrun?";
|
|
|
|
}
|
|
|
|
} else {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "skipping " << blocks << " blocks already patched to " << tgt.blocks() << " ["
|
2017-02-01 19:20:10 +01:00
|
|
|
<< params.cmdline << "]";
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (!params.freestash.empty()) {
|
|
|
|
FreeStash(params.stashbase, params.freestash);
|
|
|
|
params.freestash.clear();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2017-02-01 19:20:10 +01:00
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandErase(CommandParameters& params) {
|
2017-03-31 01:57:29 +02:00
|
|
|
if (DEBUG_ERASE) {
|
|
|
|
return PerformCommandZero(params);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
struct stat sb;
|
|
|
|
if (fstat(params.fd, &sb) == -1) {
|
|
|
|
PLOG(ERROR) << "failed to fstat device to erase";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (!S_ISBLK(sb.st_mode)) {
|
|
|
|
LOG(ERROR) << "not a block device; skipping erase";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for erase";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.canwrite) {
|
|
|
|
LOG(INFO) << " erasing " << tgt.blocks() << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
for (const auto& range : tgt) {
|
|
|
|
uint64_t blocks[2];
|
|
|
|
// offset in bytes
|
|
|
|
blocks[0] = range.first * static_cast<uint64_t>(BLOCKSIZE);
|
|
|
|
// length in bytes
|
|
|
|
blocks[1] = (range.second - range.first) * static_cast<uint64_t>(BLOCKSIZE);
|
|
|
|
|
|
|
|
if (ioctl(params.fd, BLKDISCARD, &blocks) == -1) {
|
|
|
|
PLOG(ERROR) << "BLKDISCARD ioctl failed";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Definitions for transfer list command functions
|
2015-08-28 01:41:21 +02:00
|
|
|
typedef int (*CommandFunction)(CommandParameters&);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
struct Command {
|
2014-12-09 17:39:47 +01:00
|
|
|
const char* name;
|
|
|
|
CommandFunction f;
|
2015-08-28 01:41:21 +02:00
|
|
|
};
|
2014-12-09 17:39:47 +01:00
|
|
|
|
|
|
|
// args:
|
|
|
|
// - block device (or file) to modify in-place
|
|
|
|
// - transfer list (blob)
|
|
|
|
// - new data stream (filename within package.zip)
|
|
|
|
// - patch stream (filename within package.zip, must be uncompressed)
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
static Value* PerformBlockImageUpdate(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv,
|
|
|
|
const Command* commands, size_t cmdcount, bool dryrun) {
|
2017-03-13 22:57:34 +01:00
|
|
|
CommandParameters params = {};
|
|
|
|
params.canwrite = !dryrun;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "performing " << (dryrun ? "verification" : "update");
|
|
|
|
if (state->is_retry) {
|
|
|
|
is_retry = true;
|
|
|
|
LOG(INFO) << "This update is a retry.";
|
|
|
|
}
|
|
|
|
if (argv.size() != 4) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& blockdev_filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& transfer_list_value = args[1];
|
|
|
|
const std::unique_ptr<Value>& new_data_fn = args[2];
|
|
|
|
const std::unique_ptr<Value>& patch_data_fn = args[3];
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (blockdev_filename->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
if (transfer_list_value->type != VAL_BLOB) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
if (new_data_fn->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
if (patch_data_fn->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie);
|
|
|
|
if (ui == nullptr) {
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
FILE* cmd_pipe = ui->cmd_pipe;
|
|
|
|
ZipArchiveHandle za = ui->package_zip;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (cmd_pipe == nullptr || za == nullptr) {
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
ZipString path_data(patch_data_fn->data.c_str());
|
|
|
|
ZipEntry patch_entry;
|
|
|
|
if (FindEntry(za, path_data, &patch_entry) != 0) {
|
|
|
|
LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
params.patch_start = ui->package_zip_addr + patch_entry.offset;
|
|
|
|
ZipString new_data(new_data_fn->data.c_str());
|
|
|
|
ZipEntry new_entry;
|
|
|
|
if (FindEntry(za, new_data, &new_entry) != 0) {
|
|
|
|
LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR)));
|
|
|
|
if (params.fd == -1) {
|
|
|
|
PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
|
|
|
params.nti.za = za;
|
|
|
|
params.nti.entry = new_entry;
|
2017-06-30 02:04:21 +02:00
|
|
|
params.nti.brotli_compressed = android::base::EndsWith(new_data_fn->data, ".br");
|
|
|
|
if (params.nti.brotli_compressed) {
|
|
|
|
// Initialize brotli decoder state.
|
|
|
|
params.nti.brotli_decoder_state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
|
|
|
|
}
|
2017-04-04 05:01:17 +02:00
|
|
|
params.nti.receiver_available = true;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
pthread_mutex_init(¶ms.nti.mu, nullptr);
|
|
|
|
pthread_cond_init(¶ms.nti.cv, nullptr);
|
|
|
|
pthread_attr_t attr;
|
|
|
|
pthread_attr_init(&attr);
|
|
|
|
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti);
|
|
|
|
if (error != 0) {
|
|
|
|
PLOG(ERROR) << "pthread_create failed";
|
|
|
|
return StringValue("");
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n");
|
|
|
|
if (lines.size() < 2) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]",
|
2017-03-13 22:57:34 +01:00
|
|
|
lines.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-09-24 20:10:51 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// First line in transfer list is the version number.
|
|
|
|
if (!android::base::ParseInt(lines[0], ¶ms.version, 3, 4)) {
|
|
|
|
LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "blockimg version is " << params.version;
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Second line in transfer list is the total number of blocks we expect to write.
|
|
|
|
size_t total_blocks;
|
|
|
|
if (!android::base::ParseUint(lines[1], &total_blocks)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]", lines[1].c_str());
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (total_blocks == 0) {
|
|
|
|
return StringValue("t");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
size_t start = 2;
|
|
|
|
if (lines.size() < 4) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]",
|
2017-03-13 22:57:34 +01:00
|
|
|
lines.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-08-28 01:41:21 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Third line is how many stash entries are needed simultaneously.
|
|
|
|
LOG(INFO) << "maximum stash entries " << lines[2];
|
2015-09-24 20:10:51 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Fourth line is the maximum number of blocks that will be stashed simultaneously
|
|
|
|
size_t stash_max_blocks;
|
|
|
|
if (!android::base::ParseUint(lines[3], &stash_max_blocks)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]",
|
2017-03-13 22:57:34 +01:00
|
|
|
lines[3].c_str());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase);
|
|
|
|
if (res == -1) {
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
params.createdstash = res;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
// When performing an update, save the index and cmdline of the current command into
|
|
|
|
// the last_command_file if this command writes to the stash either explicitly of implicitly.
|
|
|
|
// Upon resuming an update, read the saved index first; then
|
|
|
|
// 1. In verification mode, check if the 'move' or 'diff' commands before the saved index has
|
|
|
|
// the expected target blocks already. If not, these commands cannot be skipped and we need
|
|
|
|
// to attempt to execute them again. Therefore, we will delete the last_command_file so that
|
|
|
|
// the update will resume from the start of the transfer list.
|
|
|
|
// 2. In update mode, skip all commands before the saved index. Therefore, we can avoid deleting
|
|
|
|
// stashes with duplicate id unintentionally (b/69858743); and also speed up the update.
|
|
|
|
// If an update succeeds or is unresumable, delete the last_command_file.
|
|
|
|
int saved_last_command_index;
|
|
|
|
if (!ParseLastCommandFile(&saved_last_command_index)) {
|
|
|
|
DeleteLastCommandFile();
|
|
|
|
// We failed to parse the last command, set it explicitly to -1.
|
|
|
|
saved_last_command_index = -1;
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
start += 2;
|
2015-09-26 02:12:28 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Build a map of the available commands
|
|
|
|
std::unordered_map<std::string, const Command*> cmd_map;
|
|
|
|
for (size_t i = 0; i < cmdcount; ++i) {
|
|
|
|
if (cmd_map.find(commands[i].name) != cmd_map.end()) {
|
|
|
|
LOG(ERROR) << "Error: command [" << commands[i].name << "] already exists in the cmd map.";
|
|
|
|
return StringValue(strdup(""));
|
|
|
|
}
|
|
|
|
cmd_map[commands[i].name] = &commands[i];
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
int rc = -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Subsequent lines are all individual transfer commands
|
2017-12-05 20:04:17 +01:00
|
|
|
for (size_t i = start; i < lines.size(); i++) {
|
|
|
|
const std::string& line = lines[i];
|
2017-03-13 22:57:34 +01:00
|
|
|
if (line.empty()) continue;
|
2016-09-09 05:10:11 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
params.tokens = android::base::Split(line, " ");
|
|
|
|
params.cpos = 0;
|
2017-12-05 20:04:17 +01:00
|
|
|
if (i - start > std::numeric_limits<int>::max()) {
|
|
|
|
params.cmdindex = -1;
|
|
|
|
} else {
|
|
|
|
params.cmdindex = i - start;
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
params.cmdname = params.tokens[params.cpos++].c_str();
|
|
|
|
params.cmdline = line.c_str();
|
2017-12-05 20:04:17 +01:00
|
|
|
params.target_verified = false;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (cmd_map.find(params.cmdname) == cmd_map.end()) {
|
|
|
|
LOG(ERROR) << "unexpected command [" << params.cmdname << "]";
|
|
|
|
goto pbiudone;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
const Command* cmd = cmd_map[params.cmdname];
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-02-28 02:05:39 +01:00
|
|
|
// Skip the command if we explicitly set the corresponding function pointer to nullptr, e.g.
|
|
|
|
// "erase" during block_image_verify.
|
2017-12-05 20:04:17 +01:00
|
|
|
if (cmd->f == nullptr) {
|
2018-02-28 02:05:39 +01:00
|
|
|
LOG(DEBUG) << "skip executing command [" << line << "]";
|
|
|
|
continue;
|
2017-12-05 20:04:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Skip all commands before the saved last command index when resuming an update.
|
|
|
|
if (params.canwrite && params.cmdindex != -1 && params.cmdindex <= saved_last_command_index) {
|
|
|
|
LOG(INFO) << "Skipping already executed command: " << params.cmdindex
|
|
|
|
<< ", last executed command for previous update: " << saved_last_command_index;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd->f(params) == -1) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(ERROR) << "failed to execute command [" << line << "]";
|
|
|
|
goto pbiudone;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
// In verify mode, check if the commands before the saved last_command_index have been
|
|
|
|
// executed correctly. If some target blocks have unexpected contents, delete the last command
|
|
|
|
// file so that we will resume the update from the first command in the transfer list.
|
|
|
|
if (!params.canwrite && saved_last_command_index != -1 && params.cmdindex != -1 &&
|
|
|
|
params.cmdindex <= saved_last_command_index) {
|
|
|
|
// TODO(xunchang) check that the cmdline of the saved index is correct.
|
|
|
|
std::string cmdname = std::string(params.cmdname);
|
|
|
|
if ((cmdname == "move" || cmdname == "bsdiff" || cmdname == "imgdiff") &&
|
|
|
|
!params.target_verified) {
|
|
|
|
LOG(WARNING) << "Previously executed command " << saved_last_command_index << ": "
|
|
|
|
<< params.cmdline << " doesn't produce expected target blocks.";
|
|
|
|
saved_last_command_index = -1;
|
|
|
|
DeleteLastCommandFile();
|
|
|
|
}
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
|
|
|
if (ota_fsync(params.fd) == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kFsyncFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "fsync failed";
|
2017-03-13 22:57:34 +01:00
|
|
|
goto pbiudone;
|
|
|
|
}
|
|
|
|
fprintf(cmd_pipe, "set_progress %.4f\n", static_cast<double>(params.written) / total_blocks);
|
|
|
|
fflush(cmd_pipe);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-10-18 22:15:21 +02:00
|
|
|
rc = 0;
|
|
|
|
|
|
|
|
pbiudone:
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
2017-10-18 22:15:21 +02:00
|
|
|
pthread_mutex_lock(¶ms.nti.mu);
|
|
|
|
if (params.nti.receiver_available) {
|
|
|
|
LOG(WARNING) << "new data receiver is still available after executing all commands.";
|
|
|
|
}
|
|
|
|
params.nti.receiver_available = false;
|
|
|
|
pthread_cond_broadcast(¶ms.nti.cv);
|
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
int ret = pthread_join(params.thread, nullptr);
|
|
|
|
if (ret != 0) {
|
|
|
|
LOG(WARNING) << "pthread join returned with " << strerror(ret);
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
|
2017-10-18 22:15:21 +02:00
|
|
|
if (rc == 0) {
|
|
|
|
LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks;
|
|
|
|
LOG(INFO) << "stashed " << params.stashed << " blocks";
|
|
|
|
LOG(INFO) << "max alloc needed was " << params.buffer.size();
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-10-18 22:15:21 +02:00
|
|
|
const char* partition = strrchr(blockdev_filename->data.c_str(), '/');
|
|
|
|
if (partition != nullptr && *(partition + 1) != 0) {
|
|
|
|
fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, params.written * BLOCKSIZE);
|
|
|
|
fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, params.stashed * BLOCKSIZE);
|
|
|
|
fflush(cmd_pipe);
|
|
|
|
}
|
|
|
|
// Delete stash only after successfully completing the update, as it may contain blocks needed
|
|
|
|
// to complete the update later.
|
|
|
|
DeleteStash(params.stashbase);
|
2017-12-05 20:04:17 +01:00
|
|
|
DeleteLastCommandFile();
|
2016-04-30 20:49:59 +02:00
|
|
|
}
|
2017-10-18 22:15:21 +02:00
|
|
|
|
|
|
|
pthread_mutex_destroy(¶ms.nti.mu);
|
|
|
|
pthread_cond_destroy(¶ms.nti.cv);
|
|
|
|
} else if (rc == 0) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "verified partition contents; update may be resumed";
|
|
|
|
}
|
2016-04-30 20:49:59 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (ota_fsync(params.fd) == -1) {
|
|
|
|
failure_type = kFsyncFailure;
|
|
|
|
PLOG(ERROR) << "fsync failed";
|
|
|
|
}
|
|
|
|
// params.fd will be automatically closed because it's a unique_fd.
|
|
|
|
|
2017-06-30 02:04:21 +02:00
|
|
|
if (params.nti.brotli_decoder_state != nullptr) {
|
|
|
|
BrotliDecoderDestroyInstance(params.nti.brotli_decoder_state);
|
|
|
|
}
|
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
// Delete the last command file if the update cannot be resumed.
|
|
|
|
if (params.isunresumable) {
|
|
|
|
DeleteLastCommandFile();
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Only delete the stash if the update cannot be resumed, or it's a verification run and we
|
|
|
|
// created the stash.
|
|
|
|
if (params.isunresumable || (!params.canwrite && params.createdstash)) {
|
|
|
|
DeleteStash(params.stashbase);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (failure_type != kNoCause && state->cause_code == kNoCause) {
|
|
|
|
state->cause_code = failure_type;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue(rc == 0 ? "t" : "");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The transfer list is a text file containing commands to transfer data from one place to another
|
|
|
|
* on the target partition. We parse it and execute the commands in order:
|
|
|
|
*
|
|
|
|
* zero [rangeset]
|
|
|
|
* - Fill the indicated blocks with zeros.
|
|
|
|
*
|
|
|
|
* new [rangeset]
|
|
|
|
* - Fill the blocks with data read from the new_data file.
|
|
|
|
*
|
|
|
|
* erase [rangeset]
|
|
|
|
* - Mark the given blocks as empty.
|
|
|
|
*
|
|
|
|
* move <...>
|
|
|
|
* bsdiff <patchstart> <patchlen> <...>
|
|
|
|
* imgdiff <patchstart> <patchlen> <...>
|
|
|
|
* - Read the source blocks, apply a patch (or not in the case of move), write result to target
|
|
|
|
* blocks. bsdiff or imgdiff specifies the type of patch; move means no patch at all.
|
|
|
|
*
|
|
|
|
* See the comments in LoadSrcTgtVersion3() for a description of the <...> format.
|
|
|
|
*
|
|
|
|
* stash <stash_id> <src_range>
|
|
|
|
* - Load the given source range and stash the data in the given slot of the stash table.
|
|
|
|
*
|
|
|
|
* free <stash_id>
|
|
|
|
* - Free the given stash data.
|
|
|
|
*
|
|
|
|
* The creator of the transfer list will guarantee that no block is read (ie, used as the source for
|
|
|
|
* a patch or move) after it has been written.
|
|
|
|
*
|
|
|
|
* The creator will guarantee that a given stash is loaded (with a stash command) before it's used
|
|
|
|
* in a move/bsdiff/imgdiff command.
|
|
|
|
*
|
|
|
|
* Within one command the source and target ranges may overlap so in general we need to read the
|
|
|
|
* entire source into memory before writing anything to the target blocks.
|
|
|
|
*
|
|
|
|
* All the patch data is concatenated into one patch_data file in the update package. It must be
|
|
|
|
* stored uncompressed because we memory-map it in directly from the archive. (Since patches are
|
|
|
|
* already compressed, we lose very little by not compressing their concatenation.)
|
|
|
|
*
|
|
|
|
* Commands that read data from the partition (i.e. move/bsdiff/imgdiff/stash) have one or more
|
|
|
|
* additional hashes before the range parameters, which are used to check if the command has already
|
|
|
|
* been completed and verify the integrity of the source data.
|
|
|
|
*/
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageVerifyFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2015-08-28 01:41:21 +02:00
|
|
|
// Commands which are not tested are set to nullptr to skip them completely
|
2014-12-09 17:39:47 +01:00
|
|
|
const Command commands[] = {
|
|
|
|
{ "bsdiff", PerformCommandDiff },
|
2015-08-28 01:41:21 +02:00
|
|
|
{ "erase", nullptr },
|
2014-12-09 17:39:47 +01:00
|
|
|
{ "free", PerformCommandFree },
|
|
|
|
{ "imgdiff", PerformCommandDiff },
|
|
|
|
{ "move", PerformCommandMove },
|
2015-08-28 01:41:21 +02:00
|
|
|
{ "new", nullptr },
|
2014-12-09 17:39:47 +01:00
|
|
|
{ "stash", PerformCommandStash },
|
2015-08-28 01:41:21 +02:00
|
|
|
{ "zero", nullptr }
|
2014-12-09 17:39:47 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Perform a dry run without writing to test if an update can proceed
|
2017-03-06 23:44:59 +01:00
|
|
|
return PerformBlockImageUpdate(name, state, argv, commands,
|
2015-08-06 00:20:27 +02:00
|
|
|
sizeof(commands) / sizeof(commands[0]), true);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageUpdateFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2014-12-09 17:39:47 +01:00
|
|
|
const Command commands[] = {
|
|
|
|
{ "bsdiff", PerformCommandDiff },
|
|
|
|
{ "erase", PerformCommandErase },
|
|
|
|
{ "free", PerformCommandFree },
|
|
|
|
{ "imgdiff", PerformCommandDiff },
|
|
|
|
{ "move", PerformCommandMove },
|
|
|
|
{ "new", PerformCommandNew },
|
|
|
|
{ "stash", PerformCommandStash },
|
|
|
|
{ "zero", PerformCommandZero }
|
|
|
|
};
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
return PerformBlockImageUpdate(name, state, argv, commands,
|
2015-08-06 00:20:27 +02:00
|
|
|
sizeof(commands) / sizeof(commands[0]), false);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* RangeSha1Fn(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 2) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "range_sha1 expects 2 arguments, got %zu", argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& blockdev_filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& ranges = args[1];
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (blockdev_filename->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
if (ranges->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "ranges argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
android::base::unique_fd fd(ota_open(blockdev_filename->data.c_str(), O_RDWR));
|
|
|
|
if (fd == -1) {
|
|
|
|
ErrorAbort(state, kFileOpenFailure, "open \"%s\" failed: %s", blockdev_filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
RangeSet rs = RangeSet::Parse(ranges->data);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(rs));
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
SHA_CTX ctx;
|
|
|
|
SHA1_Init(&ctx);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<uint8_t> buffer(BLOCKSIZE);
|
2017-03-31 01:57:29 +02:00
|
|
|
for (const auto& range : rs) {
|
|
|
|
if (!check_lseek(fd, static_cast<off64_t>(range.first) * BLOCKSIZE, SEEK_SET)) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kLseekFailure, "failed to seek %s: %s", blockdev_filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
for (size_t j = range.first; j < range.second; ++j) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (read_all(fd, buffer, BLOCKSIZE) == -1) {
|
|
|
|
ErrorAbort(state, kFreadFailure, "failed to read %s: %s", blockdev_filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
SHA1_Update(&ctx, buffer.data(), BLOCKSIZE);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2017-03-31 10:18:13 +02:00
|
|
|
}
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1_Final(digest, &ctx);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue(print_sha1(digest));
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-12-15 20:47:30 +01:00
|
|
|
// This function checks if a device has been remounted R/W prior to an incremental
|
|
|
|
// OTA update. This is an common cause of update abortion. The function reads the
|
|
|
|
// 1st block of each partition and check for mounting time/count. It return string "t"
|
|
|
|
// if executes successfully and an empty string otherwise.
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* CheckFirstBlockFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 1) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "check_first_block expects 1 argument, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& arg_filename = args[0];
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (arg_filename->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
android::base::unique_fd fd(ota_open(arg_filename->data.c_str(), O_RDONLY));
|
|
|
|
if (fd == -1) {
|
|
|
|
ErrorAbort(state, kFileOpenFailure, "open \"%s\" failed: %s", arg_filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
RangeSet blk0(std::vector<Range>{ Range{ 0, 1 } });
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<uint8_t> block0_buffer(BLOCKSIZE);
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (ReadBlocks(blk0, block0_buffer, fd) == -1) {
|
|
|
|
ErrorAbort(state, kFreadFailure, "failed to read %s: %s", arg_filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
|
|
|
|
// Super block starts from block 0, offset 0x400
|
|
|
|
// 0x2C: len32 Mount time
|
|
|
|
// 0x30: len32 Write time
|
|
|
|
// 0x34: len16 Number of mounts since the last fsck
|
|
|
|
// 0x38: len16 Magic signature 0xEF53
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
time_t mount_time = *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x2C]);
|
|
|
|
uint16_t mount_count = *reinterpret_cast<uint16_t*>(&block0_buffer[0x400 + 0x34]);
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (mount_count > 0) {
|
|
|
|
uiPrintf(state, "Device was remounted R/W %" PRIu16 " times", mount_count);
|
|
|
|
uiPrintf(state, "Last remount happened on %s", ctime(&mount_time));
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue("t");
|
2015-12-15 20:47:30 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageRecoverFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 2) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "block_image_recover expects 2 arguments, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& ranges = args[1];
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (filename->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
if (ranges->type != VAL_STRING) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "ranges argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-11-04 08:08:08 +01:00
|
|
|
RangeSet rs = RangeSet::Parse(ranges->data);
|
|
|
|
if (!rs) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "failed to parse ranges: %s", ranges->data.c_str());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// Output notice to log when recover is attempted
|
|
|
|
LOG(INFO) << filename->data << " image corrupted, attempting to recover...";
|
2015-12-10 00:29:45 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// When opened with O_RDWR, libfec rewrites corrupted blocks when they are read
|
|
|
|
fec::io fh(filename->data, O_RDWR);
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (!fh) {
|
|
|
|
ErrorAbort(state, kLibfecFailure, "fec_open \"%s\" failed: %s", filename->data.c_str(),
|
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (!fh.has_ecc() || !fh.has_verity()) {
|
|
|
|
ErrorAbort(state, kLibfecFailure, "unable to use metadata to correct errors");
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
fec_status status;
|
|
|
|
if (!fh.get_status(status)) {
|
|
|
|
ErrorAbort(state, kLibfecFailure, "failed to read FEC status");
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
uint8_t buffer[BLOCKSIZE];
|
2017-11-04 08:08:08 +01:00
|
|
|
for (const auto& range : rs) {
|
2017-03-31 01:57:29 +02:00
|
|
|
for (size_t j = range.first; j < range.second; ++j) {
|
2017-03-31 10:18:13 +02:00
|
|
|
// Stay within the data area, libfec validates and corrects metadata
|
2017-03-31 01:57:29 +02:00
|
|
|
if (status.data_size <= static_cast<uint64_t>(j) * BLOCKSIZE) {
|
2017-03-31 10:18:13 +02:00
|
|
|
continue;
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (fh.pread(buffer, BLOCKSIZE, static_cast<off64_t>(j) * BLOCKSIZE) != BLOCKSIZE) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kLibfecFailure, "failed to recover %s (block %zu): %s",
|
|
|
|
filename->data.c_str(), j, strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// If we want to be able to recover from a situation where rewriting a corrected
|
|
|
|
// block doesn't guarantee the same data will be returned when re-read later, we
|
|
|
|
// can save a copy of corrected blocks to /cache. Note:
|
|
|
|
//
|
|
|
|
// 1. Maximum space required from /cache is the same as the maximum number of
|
|
|
|
// corrupted blocks we can correct. For RS(255, 253) and a 2 GiB partition,
|
|
|
|
// this would be ~16 MiB, for example.
|
|
|
|
//
|
|
|
|
// 2. To find out if this block was corrupted, call fec_get_status after each
|
|
|
|
// read and check if the errors field value has increased.
|
2015-06-25 11:25:36 +02:00
|
|
|
}
|
2017-03-31 10:18:13 +02:00
|
|
|
}
|
|
|
|
LOG(INFO) << "..." << filename->data << " image recovered successfully.";
|
|
|
|
return StringValue("t");
|
2015-06-25 11:25:36 +02:00
|
|
|
}
|
|
|
|
|
2014-08-15 23:31:52 +02:00
|
|
|
void RegisterBlockImageFunctions() {
|
2017-03-31 10:18:13 +02:00
|
|
|
RegisterFunction("block_image_verify", BlockImageVerifyFn);
|
|
|
|
RegisterFunction("block_image_update", BlockImageUpdateFn);
|
|
|
|
RegisterFunction("block_image_recover", BlockImageRecoverFn);
|
|
|
|
RegisterFunction("check_first_block", CheckFirstBlockFn);
|
|
|
|
RegisterFunction("range_sha1", RangeSha1Fn);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|