2014-08-15 23:31:52 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
2014-12-09 17:39:47 +01:00
|
|
|
#include <dirent.h>
|
2018-04-26 03:59:40 +02:00
|
|
|
#include <errno.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <fcntl.h>
|
2017-03-30 08:57:47 +02:00
|
|
|
#include <inttypes.h>
|
2015-06-24 08:23:33 +02:00
|
|
|
#include <linux/fs.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2018-04-26 03:59:40 +02:00
|
|
|
#include <sys/ioctl.h>
|
2014-12-09 17:39:47 +01:00
|
|
|
#include <sys/stat.h>
|
2014-08-15 23:31:52 +02:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
#include <functional>
|
2017-12-05 20:04:17 +01:00
|
|
|
#include <limits>
|
2015-08-06 00:20:27 +02:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2016-09-09 05:10:11 +02:00
|
|
|
#include <unordered_map>
|
2015-08-28 01:41:21 +02:00
|
|
|
#include <vector>
|
2015-08-06 00:20:27 +02:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
#include <android-base/file.h>
|
2016-11-23 01:29:50 +01:00
|
|
|
#include <android-base/logging.h>
|
2015-12-05 00:30:20 +01:00
|
|
|
#include <android-base/parseint.h>
|
2019-05-03 10:05:04 +02:00
|
|
|
#include <android-base/stringprintf.h>
|
2015-12-05 00:30:20 +01:00
|
|
|
#include <android-base/strings.h>
|
2016-03-23 04:19:22 +01:00
|
|
|
#include <android-base/unique_fd.h>
|
2016-12-28 23:44:05 +01:00
|
|
|
#include <applypatch/applypatch.h>
|
2017-06-30 02:04:21 +02:00
|
|
|
#include <brotli/decode.h>
|
2018-04-26 03:59:40 +02:00
|
|
|
#include <fec/io.h>
|
2016-12-28 23:44:05 +01:00
|
|
|
#include <openssl/sha.h>
|
2018-08-02 01:40:00 +02:00
|
|
|
#include <verity/hash_tree_builder.h>
|
2016-09-09 05:10:11 +02:00
|
|
|
#include <ziparchive/zip_archive.h>
|
2015-08-06 00:20:27 +02:00
|
|
|
|
2014-08-15 23:31:52 +02:00
|
|
|
#include "edify/expr.h"
|
2019-05-14 19:54:43 +02:00
|
|
|
#include "edify/updater_interface.h"
|
2019-01-11 22:52:33 +01:00
|
|
|
#include "otautil/dirutil.h"
|
2017-10-06 16:43:41 +02:00
|
|
|
#include "otautil/error_code.h"
|
2018-04-26 03:59:40 +02:00
|
|
|
#include "otautil/paths.h"
|
2017-09-29 23:39:33 +02:00
|
|
|
#include "otautil/print_sha1.h"
|
|
|
|
#include "otautil/rangeset.h"
|
2018-05-22 01:05:56 +02:00
|
|
|
#include "private/commands.h"
|
2017-03-26 22:36:49 +02:00
|
|
|
#include "updater/install.h"
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2019-05-22 23:48:35 +02:00
|
|
|
#ifdef __ANDROID__
|
|
|
|
#include <private/android_filesystem_config.h>
|
|
|
|
// Set this to 0 to interpret 'erase' transfers to mean do a BLKDISCARD ioctl (the normal behavior).
|
|
|
|
// Set to 1 to interpret erase to mean fill the region with zeroes.
|
2015-06-10 17:58:12 +02:00
|
|
|
#define DEBUG_ERASE 0
|
2019-05-22 23:48:35 +02:00
|
|
|
#else
|
|
|
|
#define DEBUG_ERASE 1
|
|
|
|
#define AID_SYSTEM -1
|
|
|
|
#endif // __ANDROID__
|
2015-06-10 17:58:12 +02:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
static constexpr size_t BLOCKSIZE = 4096;
|
|
|
|
static constexpr mode_t STASH_DIRECTORY_MODE = 0700;
|
|
|
|
static constexpr mode_t STASH_FILE_MODE = 0600;
|
2018-12-20 01:21:55 +01:00
|
|
|
static constexpr mode_t MARKER_DIRECTORY_MODE = 0700;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-04-30 20:49:59 +02:00
|
|
|
static CauseCode failure_type = kNoCause;
|
2016-05-31 18:29:49 +02:00
|
|
|
static bool is_retry = false;
|
2016-09-09 05:10:11 +02:00
|
|
|
static std::unordered_map<std::string, RangeSet> stash_map;
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
static void DeleteLastCommandFile() {
|
2018-04-26 03:59:40 +02:00
|
|
|
const std::string& last_command_file = Paths::Get().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
if (unlink(last_command_file.c_str()) == -1 && errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "Failed to unlink: " << last_command_file;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the last command index of the last update and save the result to |last_command_index|.
|
|
|
|
// Return true if we successfully read the index.
|
2018-05-21 23:59:55 +02:00
|
|
|
static bool ParseLastCommandFile(size_t* last_command_index) {
|
2018-04-26 03:59:40 +02:00
|
|
|
const std::string& last_command_file = Paths::Get().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(last_command_file.c_str(), O_RDONLY)));
|
|
|
|
if (fd == -1) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "Failed to open " << last_command_file;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << last_command_file << " doesn't exist.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the last_command file exists, parse the last command index of previous update.
|
|
|
|
std::string content;
|
|
|
|
if (!android::base::ReadFdToString(fd.get(), &content)) {
|
|
|
|
LOG(ERROR) << "Failed to read: " << last_command_file;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> lines = android::base::Split(android::base::Trim(content), "\n");
|
|
|
|
if (lines.size() != 2) {
|
|
|
|
LOG(ERROR) << "Unexpected line counts in last command file: " << content;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-05 23:37:13 +02:00
|
|
|
if (!android::base::ParseUint(lines[0], last_command_index)) {
|
2017-12-05 20:04:17 +01:00
|
|
|
LOG(ERROR) << "Failed to parse integer in: " << lines[0];
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
static bool FsyncDir(const std::string& dirname) {
|
2018-08-27 19:50:31 +02:00
|
|
|
android::base::unique_fd dfd(TEMP_FAILURE_RETRY(open(dirname.c_str(), O_RDONLY | O_DIRECTORY)));
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
if (dfd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
failure_type = errno == EIO ? kEioFailure : kFileOpenFailure;
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
PLOG(ERROR) << "Failed to open " << dirname;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (fsync(dfd) == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
failure_type = errno == EIO ? kEioFailure : kFsyncFailure;
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
PLOG(ERROR) << "Failed to fsync " << dirname;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-16 00:09:59 +02:00
|
|
|
// Update the last executed command index in the last_command_file.
|
2018-05-21 23:59:55 +02:00
|
|
|
static bool UpdateLastCommandIndex(size_t command_index, const std::string& command_string) {
|
2018-04-26 03:59:40 +02:00
|
|
|
const std::string& last_command_file = Paths::Get().last_command_file();
|
2017-12-05 20:04:17 +01:00
|
|
|
std::string last_command_tmp = last_command_file + ".tmp";
|
|
|
|
std::string content = std::to_string(command_index) + "\n" + command_string;
|
|
|
|
android::base::unique_fd wfd(
|
|
|
|
TEMP_FAILURE_RETRY(open(last_command_tmp.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0660)));
|
|
|
|
if (wfd == -1 || !android::base::WriteStringToFd(content, wfd)) {
|
|
|
|
PLOG(ERROR) << "Failed to update last command";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fsync(wfd) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to fsync " << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chown(last_command_tmp.c_str(), AID_SYSTEM, AID_SYSTEM) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to change owner for " << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rename(last_command_tmp.c_str(), last_command_file.c_str()) == -1) {
|
|
|
|
PLOG(ERROR) << "Failed to rename" << last_command_tmp;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
if (!FsyncDir(android::base::Dirname(last_command_file))) {
|
2017-12-05 20:04:17 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-12-20 01:21:55 +01:00
|
|
|
bool SetUpdatedMarker(const std::string& marker) {
|
|
|
|
auto dirname = android::base::Dirname(marker);
|
|
|
|
auto res = mkdir(dirname.c_str(), MARKER_DIRECTORY_MODE);
|
|
|
|
if (res == -1 && errno != EEXIST) {
|
|
|
|
PLOG(ERROR) << "Failed to create directory for marker: " << dirname;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
if (!android::base::WriteStringToFile("", marker)) {
|
|
|
|
PLOG(ERROR) << "Failed to write to marker file " << marker;
|
2017-12-05 20:04:17 +01:00
|
|
|
return false;
|
|
|
|
}
|
2018-12-20 01:21:55 +01:00
|
|
|
if (!FsyncDir(dirname)) {
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
return false;
|
|
|
|
}
|
2018-12-20 01:21:55 +01:00
|
|
|
LOG(INFO) << "Wrote updated marker to " << marker;
|
2017-12-05 20:04:17 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-04 20:14:19 +01:00
|
|
|
static bool discard_blocks(int fd, off64_t offset, uint64_t size, bool force = false) {
|
|
|
|
// Don't discard blocks unless the update is a retry run or force == true
|
|
|
|
if (!is_retry && !force) {
|
2016-05-31 18:29:49 +02:00
|
|
|
return true;
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t args[2] = { static_cast<uint64_t>(offset), size };
|
|
|
|
if (ioctl(fd, BLKDISCARD, &args) == -1) {
|
2019-01-04 20:14:19 +01:00
|
|
|
// On devices that does not support BLKDISCARD, ignore the error.
|
|
|
|
if (errno == EOPNOTSUPP) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
PLOG(ERROR) << "BLKDISCARD ioctl failed";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2016-05-31 18:29:49 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 02:24:24 +02:00
|
|
|
static bool check_lseek(int fd, off64_t offset, int whence) {
|
|
|
|
off64_t rc = TEMP_FAILURE_RETRY(lseek64(fd, offset, whence));
|
|
|
|
if (rc == -1) {
|
2016-04-30 20:49:59 +02:00
|
|
|
failure_type = kLseekFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "lseek64 failed";
|
2015-04-29 02:24:24 +02:00
|
|
|
return false;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2015-04-29 02:24:24 +02:00
|
|
|
return true;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
static void allocate(size_t size, std::vector<uint8_t>* buffer) {
|
|
|
|
// If the buffer's big enough, reuse it.
|
|
|
|
if (size <= buffer->size()) return;
|
|
|
|
buffer->resize(size);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
/**
|
|
|
|
* RangeSinkWriter reads data from the given FD, and writes them to the destination specified by the
|
|
|
|
* given RangeSet.
|
|
|
|
*/
|
|
|
|
class RangeSinkWriter {
|
|
|
|
public:
|
|
|
|
RangeSinkWriter(int fd, const RangeSet& tgt)
|
2017-06-30 02:04:21 +02:00
|
|
|
: fd_(fd),
|
|
|
|
tgt_(tgt),
|
|
|
|
next_range_(0),
|
|
|
|
current_range_left_(0),
|
|
|
|
bytes_written_(0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_NE(tgt.size(), static_cast<size_t>(0));
|
2017-03-26 23:03:52 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
bool Finished() const {
|
2017-03-31 01:57:29 +02:00
|
|
|
return next_range_ == tgt_.size() && current_range_left_ == 0;
|
2017-03-28 00:12:48 +02:00
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t AvailableSpace() const {
|
|
|
|
return tgt_.blocks() * BLOCKSIZE - bytes_written_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return number of bytes written; and 0 indicates a writing failure.
|
|
|
|
size_t Write(const uint8_t* data, size_t size) {
|
2017-03-26 23:03:52 +02:00
|
|
|
if (Finished()) {
|
|
|
|
LOG(ERROR) << "range sink write overrun; can't write " << size << " bytes";
|
|
|
|
return 0;
|
2017-03-28 00:12:48 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t written = 0;
|
2017-03-26 23:03:52 +02:00
|
|
|
while (size > 0) {
|
|
|
|
// Move to the next range as needed.
|
2017-06-30 02:04:21 +02:00
|
|
|
if (!SeekToOutputRange()) {
|
|
|
|
break;
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
size_t write_now = size;
|
|
|
|
if (current_range_left_ < write_now) {
|
|
|
|
write_now = current_range_left_;
|
|
|
|
}
|
2017-03-28 00:12:48 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::WriteFully(fd_, data, write_now)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFwriteFailure;
|
|
|
|
PLOG(ERROR) << "Failed to write " << write_now << " bytes of data";
|
2017-03-28 00:12:48 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
|
|
|
|
data += write_now;
|
|
|
|
size -= write_now;
|
|
|
|
|
|
|
|
current_range_left_ -= write_now;
|
2017-07-18 20:29:40 +02:00
|
|
|
written += write_now;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
bytes_written_ += written;
|
|
|
|
return written;
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-04-04 05:01:17 +02:00
|
|
|
size_t BytesWritten() const {
|
|
|
|
return bytes_written_;
|
|
|
|
}
|
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
private:
|
2017-06-30 02:04:21 +02:00
|
|
|
// Set up the output cursor, move to next range if needed.
|
|
|
|
bool SeekToOutputRange() {
|
|
|
|
// We haven't finished the current range yet.
|
|
|
|
if (current_range_left_ != 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// We can't write any more; let the write function return how many bytes have been written
|
|
|
|
// so far.
|
|
|
|
if (next_range_ >= tgt_.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Range& range = tgt_[next_range_];
|
|
|
|
off64_t offset = static_cast<off64_t>(range.first) * BLOCKSIZE;
|
|
|
|
current_range_left_ = (range.second - range.first) * BLOCKSIZE;
|
|
|
|
next_range_++;
|
|
|
|
|
|
|
|
if (!discard_blocks(fd_, offset, current_range_left_)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!check_lseek(fd_, offset, SEEK_SET)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The output file descriptor.
|
2017-03-26 23:03:52 +02:00
|
|
|
int fd_;
|
2017-06-30 02:04:21 +02:00
|
|
|
// The destination ranges for the data.
|
2017-03-26 23:03:52 +02:00
|
|
|
const RangeSet& tgt_;
|
|
|
|
// The next range that we should write to.
|
|
|
|
size_t next_range_;
|
|
|
|
// The number of bytes to write before moving to the next range.
|
|
|
|
size_t current_range_left_;
|
2017-04-04 05:01:17 +02:00
|
|
|
// Total bytes written by the writer.
|
|
|
|
size_t bytes_written_;
|
2017-03-26 23:03:52 +02:00
|
|
|
};
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
/**
|
|
|
|
* All of the data for all the 'new' transfers is contained in one file in the update package,
|
|
|
|
* concatenated together in the order in which transfers.list will need it. We want to stream it out
|
|
|
|
* of the archive (it's compressed) without writing it to a temp file, but we can't write each
|
|
|
|
* section until it's that transfer's turn to go.
|
|
|
|
*
|
|
|
|
* To achieve this, we expand the new data from the archive in a background thread, and block that
|
|
|
|
* threads 'receive uncompressed data' function until the main thread has reached a point where we
|
|
|
|
* want some new data to be written. We signal the background thread with the destination for the
|
|
|
|
* data and block the main thread, waiting for the background thread to complete writing that
|
|
|
|
* section. Then it signals the main thread to wake up and goes back to blocking waiting for a
|
|
|
|
* transfer.
|
|
|
|
*
|
|
|
|
* NewThreadInfo is the struct used to pass information back and forth between the two threads. When
|
|
|
|
* the main thread wants some data written, it sets writer to the destination location and signals
|
|
|
|
* the condition. When the background thread is done writing, it clears writer and signals the
|
|
|
|
* condition again.
|
|
|
|
*/
|
2015-08-28 01:41:21 +02:00
|
|
|
struct NewThreadInfo {
|
2017-03-26 23:03:52 +02:00
|
|
|
ZipArchiveHandle za;
|
2020-09-16 20:06:12 +02:00
|
|
|
ZipEntry64 entry{};
|
2017-06-30 02:04:21 +02:00
|
|
|
bool brotli_compressed;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-06-30 02:04:21 +02:00
|
|
|
std::unique_ptr<RangeSinkWriter> writer;
|
|
|
|
BrotliDecoderState* brotli_decoder_state;
|
2017-04-04 05:01:17 +02:00
|
|
|
bool receiver_available;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_t mu;
|
|
|
|
pthread_cond_t cv;
|
2015-08-28 01:41:21 +02:00
|
|
|
};
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2016-09-09 05:10:11 +02:00
|
|
|
static bool receive_new_data(const uint8_t* data, size_t size, void* cookie) {
|
2017-03-26 23:03:52 +02:00
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
while (size > 0) {
|
|
|
|
// Wait for nti->writer to be non-null, indicating some of this data is wanted.
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
while (nti->writer == nullptr) {
|
2017-10-18 22:15:21 +02:00
|
|
|
// End the new data receiver if we encounter an error when performing block image update.
|
|
|
|
if (!nti->receiver_available) {
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return false;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_wait(&nti->cv, &nti->mu);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
// At this point nti->writer is set, and we own it. The main thread is waiting for it to
|
|
|
|
// disappear from nti.
|
2017-07-18 20:29:40 +02:00
|
|
|
size_t write_now = std::min(size, nti->writer->AvailableSpace());
|
|
|
|
if (nti->writer->Write(data, write_now) != write_now) {
|
|
|
|
LOG(ERROR) << "Failed to write " << write_now << " bytes.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
data += write_now;
|
|
|
|
size -= write_now;
|
2017-06-30 02:04:21 +02:00
|
|
|
|
2017-07-18 20:29:40 +02:00
|
|
|
if (nti->writer->Finished()) {
|
|
|
|
// We have written all the bytes desired by this writer.
|
|
|
|
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->writer = nullptr;
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool receive_brotli_new_data(const uint8_t* data, size_t size, void* cookie) {
|
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
|
|
|
|
|
|
|
while (size > 0 || BrotliDecoderHasMoreOutput(nti->brotli_decoder_state)) {
|
|
|
|
// Wait for nti->writer to be non-null, indicating some of this data is wanted.
|
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
while (nti->writer == nullptr) {
|
2017-10-18 22:15:21 +02:00
|
|
|
// End the receiver if we encounter an error when performing block image update.
|
|
|
|
if (!nti->receiver_available) {
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return false;
|
|
|
|
}
|
2017-07-18 20:29:40 +02:00
|
|
|
pthread_cond_wait(&nti->cv, &nti->mu);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
|
|
|
|
// At this point nti->writer is set, and we own it. The main thread is waiting for it to
|
|
|
|
// disappear from nti.
|
|
|
|
|
|
|
|
size_t buffer_size = std::min<size_t>(32768, nti->writer->AvailableSpace());
|
|
|
|
if (buffer_size == 0) {
|
|
|
|
LOG(ERROR) << "No space left in output range";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
uint8_t buffer[buffer_size];
|
|
|
|
size_t available_in = size;
|
|
|
|
size_t available_out = buffer_size;
|
|
|
|
uint8_t* next_out = buffer;
|
|
|
|
|
|
|
|
// The brotli decoder will update |data|, |available_in|, |next_out| and |available_out|.
|
|
|
|
BrotliDecoderResult result = BrotliDecoderDecompressStream(
|
|
|
|
nti->brotli_decoder_state, &available_in, &data, &available_out, &next_out, nullptr);
|
|
|
|
|
|
|
|
if (result == BROTLI_DECODER_RESULT_ERROR) {
|
|
|
|
LOG(ERROR) << "Decompression failed with "
|
|
|
|
<< BrotliDecoderErrorString(BrotliDecoderGetErrorCode(nti->brotli_decoder_state));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(DEBUG) << "bytes to write: " << buffer_size - available_out << ", bytes consumed "
|
|
|
|
<< size - available_in << ", decoder status " << result;
|
|
|
|
|
|
|
|
size_t write_now = buffer_size - available_out;
|
|
|
|
if (nti->writer->Write(buffer, write_now) != write_now) {
|
|
|
|
LOG(ERROR) << "Failed to write " << write_now << " bytes.";
|
2017-06-30 02:04:21 +02:00
|
|
|
return false;
|
|
|
|
}
|
2017-07-18 20:29:40 +02:00
|
|
|
|
|
|
|
// Update the remaining size. The input data ptr is already updated by brotli decoder function.
|
|
|
|
size = available_in;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (nti->writer->Finished()) {
|
|
|
|
// We have written all the bytes desired by this writer.
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->writer = nullptr;
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
return true;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void* unzip_new_data(void* cookie) {
|
2017-04-04 05:01:17 +02:00
|
|
|
NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
|
2017-07-18 20:29:40 +02:00
|
|
|
if (nti->brotli_compressed) {
|
|
|
|
ProcessZipEntryContents(nti->za, &nti->entry, receive_brotli_new_data, nti);
|
|
|
|
} else {
|
|
|
|
ProcessZipEntryContents(nti->za, &nti->entry, receive_new_data, nti);
|
|
|
|
}
|
2017-04-04 05:01:17 +02:00
|
|
|
pthread_mutex_lock(&nti->mu);
|
|
|
|
nti->receiver_available = false;
|
|
|
|
if (nti->writer != nullptr) {
|
|
|
|
pthread_cond_broadcast(&nti->cv);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&nti->mu);
|
|
|
|
return nullptr;
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
static int ReadBlocks(const RangeSet& src, std::vector<uint8_t>* buffer, int fd) {
|
2017-03-31 01:57:29 +02:00
|
|
|
size_t p = 0;
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : src) {
|
|
|
|
if (!check_lseek(fd, static_cast<off64_t>(begin) * BLOCKSIZE, SEEK_SET)) {
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-08-28 19:09:13 +02:00
|
|
|
size_t size = (end - begin) * BLOCKSIZE;
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::ReadFully(fd, buffer->data() + p, size)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFreadFailure;
|
|
|
|
PLOG(ERROR) << "Failed to read " << size << " bytes of data";
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
p += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int WriteBlocks(const RangeSet& tgt, const std::vector<uint8_t>& buffer, int fd) {
|
2017-03-26 23:03:52 +02:00
|
|
|
size_t written = 0;
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : tgt) {
|
|
|
|
off64_t offset = static_cast<off64_t>(begin) * BLOCKSIZE;
|
|
|
|
size_t size = (end - begin) * BLOCKSIZE;
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!discard_blocks(fd, offset, size)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!check_lseek(fd, offset, SEEK_SET)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::WriteFully(fd, buffer.data() + written, size)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFwriteFailure;
|
|
|
|
PLOG(ERROR) << "Failed to write " << size << " bytes of data";
|
2017-03-26 23:03:52 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
written += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 01:56:27 +01:00
|
|
|
// Parameters for transfer list command functions
|
|
|
|
struct CommandParameters {
|
|
|
|
std::vector<std::string> tokens;
|
|
|
|
size_t cpos;
|
2018-05-22 01:05:56 +02:00
|
|
|
std::string cmdname;
|
|
|
|
std::string cmdline;
|
2015-12-07 01:56:27 +01:00
|
|
|
std::string freestash;
|
|
|
|
std::string stashbase;
|
|
|
|
bool canwrite;
|
|
|
|
int createdstash;
|
2016-03-23 04:19:22 +01:00
|
|
|
android::base::unique_fd fd;
|
2015-12-07 01:56:27 +01:00
|
|
|
bool foundwrites;
|
|
|
|
bool isunresumable;
|
|
|
|
int version;
|
|
|
|
size_t written;
|
2016-05-13 21:13:15 +02:00
|
|
|
size_t stashed;
|
2015-12-07 01:56:27 +01:00
|
|
|
NewThreadInfo nti;
|
|
|
|
pthread_t thread;
|
|
|
|
std::vector<uint8_t> buffer;
|
|
|
|
uint8_t* patch_start;
|
2017-12-05 20:04:17 +01:00
|
|
|
bool target_verified; // The target blocks have expected contents already.
|
2015-12-07 01:56:27 +01:00
|
|
|
};
|
|
|
|
|
2017-03-16 00:52:46 +01:00
|
|
|
// Print the hash in hex for corrupted source blocks (excluding the stashed blocks which is
|
|
|
|
// handled separately).
|
|
|
|
static void PrintHashForCorruptedSourceBlocks(const CommandParameters& params,
|
|
|
|
const std::vector<uint8_t>& buffer) {
|
|
|
|
LOG(INFO) << "unexpected contents of source blocks in cmd:\n" << params.cmdline;
|
|
|
|
CHECK(params.tokens[0] == "move" || params.tokens[0] == "bsdiff" ||
|
|
|
|
params.tokens[0] == "imgdiff");
|
|
|
|
|
|
|
|
size_t pos = 0;
|
|
|
|
// Command example:
|
|
|
|
// move <onehash> <tgt_range> <src_blk_count> <src_range> [<loc_range> <stashed_blocks>]
|
|
|
|
// bsdiff <offset> <len> <src_hash> <tgt_hash> <tgt_range> <src_blk_count> <src_range>
|
|
|
|
// [<loc_range> <stashed_blocks>]
|
|
|
|
if (params.tokens[0] == "move") {
|
|
|
|
// src_range for move starts at the 4th position.
|
|
|
|
if (params.tokens.size() < 5) {
|
|
|
|
LOG(ERROR) << "failed to parse source range in cmd:\n" << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pos = 4;
|
|
|
|
} else {
|
|
|
|
// src_range for diff starts at the 7th position.
|
|
|
|
if (params.tokens.size() < 8) {
|
|
|
|
LOG(ERROR) << "failed to parse source range in cmd:\n" << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pos = 7;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Source blocks in stash only, no work to do.
|
|
|
|
if (params.tokens[pos] == "-") {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[pos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
if (!src) {
|
|
|
|
LOG(ERROR) << "Failed to parse range in " << params.cmdline;
|
|
|
|
return;
|
|
|
|
}
|
2017-03-16 00:52:46 +01:00
|
|
|
|
|
|
|
RangeSet locs;
|
|
|
|
// If there's no stashed blocks, content in the buffer is consecutive and has the same
|
|
|
|
// order as the source blocks.
|
|
|
|
if (pos == params.tokens.size()) {
|
2017-03-31 01:57:29 +02:00
|
|
|
locs = RangeSet(std::vector<Range>{ Range{ 0, src.blocks() } });
|
2017-03-16 00:52:46 +01:00
|
|
|
} else {
|
|
|
|
// Otherwise, the next token is the offset of the source blocks in the target range.
|
|
|
|
// Example: for the tokens <4,63946,63947,63948,63979> <4,6,7,8,39> <stashed_blocks>;
|
|
|
|
// We want to print SHA-1 for the data in buffer[6], buffer[8], buffer[9] ... buffer[38];
|
|
|
|
// this corresponds to the 32 src blocks #63946, #63948, #63949 ... #63978.
|
2017-03-26 22:36:49 +02:00
|
|
|
locs = RangeSet::Parse(params.tokens[pos++]);
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_EQ(src.blocks(), locs.blocks());
|
2017-03-16 00:52:46 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "printing hash in hex for " << src.blocks() << " source blocks";
|
|
|
|
for (size_t i = 0; i < src.blocks(); i++) {
|
2017-03-26 22:36:49 +02:00
|
|
|
size_t block_num = src.GetBlockNumber(i);
|
|
|
|
size_t buffer_index = locs.GetBlockNumber(i);
|
2017-03-16 00:52:46 +01:00
|
|
|
CHECK_LE((buffer_index + 1) * BLOCKSIZE, buffer.size());
|
|
|
|
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1(buffer.data() + buffer_index * BLOCKSIZE, BLOCKSIZE, digest);
|
|
|
|
std::string hexdigest = print_sha1(digest);
|
|
|
|
LOG(INFO) << " block number: " << block_num << ", SHA-1: " << hexdigest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the calculated hash for the whole stash doesn't match the stash id, print the SHA-1
|
|
|
|
// in hex for each block.
|
|
|
|
static void PrintHashForCorruptedStashedBlocks(const std::string& id,
|
|
|
|
const std::vector<uint8_t>& buffer,
|
|
|
|
const RangeSet& src) {
|
|
|
|
LOG(INFO) << "printing hash in hex for stash_id: " << id;
|
2017-03-31 01:57:29 +02:00
|
|
|
CHECK_EQ(src.blocks() * BLOCKSIZE, buffer.size());
|
2017-03-16 00:52:46 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
for (size_t i = 0; i < src.blocks(); i++) {
|
2017-03-26 22:36:49 +02:00
|
|
|
size_t block_num = src.GetBlockNumber(i);
|
2017-03-16 00:52:46 +01:00
|
|
|
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1(buffer.data() + i * BLOCKSIZE, BLOCKSIZE, digest);
|
|
|
|
std::string hexdigest = print_sha1(digest);
|
|
|
|
LOG(INFO) << " block number: " << block_num << ", SHA-1: " << hexdigest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the stash file doesn't exist, read the source blocks this stash contains and print the
|
|
|
|
// SHA-1 for these blocks.
|
|
|
|
static void PrintHashForMissingStashedBlocks(const std::string& id, int fd) {
|
|
|
|
if (stash_map.find(id) == stash_map.end()) {
|
|
|
|
LOG(ERROR) << "No stash saved for id: " << id;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << "print hash in hex for source blocks in missing stash: " << id;
|
|
|
|
const RangeSet& src = stash_map[id];
|
2017-03-31 01:57:29 +02:00
|
|
|
std::vector<uint8_t> buffer(src.blocks() * BLOCKSIZE);
|
2018-05-31 01:14:14 +02:00
|
|
|
if (ReadBlocks(src, &buffer, fd) == -1) {
|
|
|
|
LOG(ERROR) << "failed to read source blocks for stash: " << id;
|
|
|
|
return;
|
2017-03-16 00:52:46 +01:00
|
|
|
}
|
|
|
|
PrintHashForCorruptedStashedBlocks(id, buffer, src);
|
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int VerifyBlocks(const std::string& expected, const std::vector<uint8_t>& buffer,
|
2018-05-31 01:14:14 +02:00
|
|
|
const size_t blocks, bool printerror) {
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
const uint8_t* data = buffer.data();
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
SHA1(data, blocks * BLOCKSIZE, digest);
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
std::string hexdigest = print_sha1(digest);
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (hexdigest != expected) {
|
|
|
|
if (printerror) {
|
|
|
|
LOG(ERROR) << "failed to verify blocks (expected " << expected << ", read " << hexdigest
|
|
|
|
<< ")";
|
2014-09-08 21:22:09 +02:00
|
|
|
}
|
2018-05-31 01:14:14 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static std::string GetStashFileName(const std::string& base, const std::string& id,
|
2018-04-26 03:59:40 +02:00
|
|
|
const std::string& postfix) {
|
|
|
|
if (base.empty()) {
|
|
|
|
return "";
|
|
|
|
}
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
std::string filename = Paths::Get().stash_directory_base() + "/" + base;
|
|
|
|
if (id.empty() && postfix.empty()) {
|
|
|
|
return filename;
|
|
|
|
}
|
|
|
|
return filename + "/" + id + postfix;
|
2014-09-08 21:22:09 +02:00
|
|
|
}
|
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
// Does a best effort enumeration of stash files. Ignores possible non-file items in the stash
|
|
|
|
// directory and continues despite of errors. Calls the 'callback' function for each file.
|
|
|
|
static void EnumerateStash(const std::string& dirname,
|
|
|
|
const std::function<void(const std::string&)>& callback) {
|
|
|
|
if (dirname.empty()) return;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
std::unique_ptr<DIR, decltype(&closedir)> directory(opendir(dirname.c_str()), closedir);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (directory == nullptr) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "opendir \"" << dirname << "\" failed";
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2016-12-28 23:44:05 +01:00
|
|
|
return;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
dirent* item;
|
|
|
|
while ((item = readdir(directory.get())) != nullptr) {
|
|
|
|
if (item->d_type != DT_REG) continue;
|
|
|
|
callback(dirname + "/" + item->d_name);
|
2016-12-28 23:44:05 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2014-12-09 17:39:47 +01:00
|
|
|
// Deletes the stash directory and all files in it. Assumes that it only
|
|
|
|
// contains files. There is nothing we can do about unlikely, but possible
|
|
|
|
// errors, so they are merely logged.
|
2017-03-16 01:39:01 +01:00
|
|
|
static void DeleteFile(const std::string& fn) {
|
|
|
|
if (fn.empty()) return;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
LOG(INFO) << "deleting " << fn;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (unlink(fn.c_str()) == -1 && errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "unlink \"" << fn << "\" failed";
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-06 00:20:27 +02:00
|
|
|
static void DeleteStash(const std::string& base) {
|
2017-03-16 01:39:01 +01:00
|
|
|
if (base.empty()) return;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
LOG(INFO) << "deleting stash " << base;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
std::string dirname = GetStashFileName(base, "", "");
|
|
|
|
EnumerateStash(dirname, DeleteFile);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
if (rmdir(dirname.c_str()) == -1) {
|
|
|
|
if (errno != ENOENT && errno != ENOTDIR) {
|
|
|
|
PLOG(ERROR) << "rmdir \"" << dirname << "\" failed";
|
2014-08-21 19:47:24 +02:00
|
|
|
}
|
2017-03-16 01:39:01 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
static int LoadStash(const CommandParameters& params, const std::string& id, bool verify,
|
|
|
|
std::vector<uint8_t>* buffer, bool printnoent) {
|
2017-03-31 01:57:29 +02:00
|
|
|
// In verify mode, if source range_set was saved for the given hash, check contents in the source
|
|
|
|
// blocks first. If the check fails, search for the stashed files on /cache as usual.
|
|
|
|
if (!params.canwrite) {
|
|
|
|
if (stash_map.find(id) != stash_map.end()) {
|
|
|
|
const RangeSet& src = stash_map[id];
|
|
|
|
allocate(src.blocks() * BLOCKSIZE, buffer);
|
2015-08-28 01:41:21 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (ReadBlocks(src, buffer, params.fd) == -1) {
|
|
|
|
LOG(ERROR) << "failed to read source blocks in stash map.";
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-31 01:14:14 +02:00
|
|
|
if (VerifyBlocks(id, *buffer, src.blocks(), true) != 0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(ERROR) << "failed to verify loaded source blocks in stash map.";
|
2018-03-09 23:10:25 +01:00
|
|
|
if (!is_retry) {
|
|
|
|
PrintHashForCorruptedStashedBlocks(id, *buffer, src);
|
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
std::string fn = GetStashFileName(params.stashbase, id, "");
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
struct stat sb;
|
|
|
|
if (stat(fn.c_str(), &sb) == -1) {
|
|
|
|
if (errno != ENOENT || printnoent) {
|
|
|
|
PLOG(ERROR) << "stat \"" << fn << "\" failed";
|
|
|
|
PrintHashForMissingStashedBlocks(id, params.fd);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-09-08 21:22:09 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " loading " << fn;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if ((sb.st_size % BLOCKSIZE) != 0) {
|
|
|
|
LOG(ERROR) << fn << " size " << sb.st_size << " not multiple of block size " << BLOCKSIZE;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(fn.c_str(), O_RDONLY)));
|
2017-03-31 01:57:29 +02:00
|
|
|
if (fd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
failure_type = errno == EIO ? kEioFailure : kFileOpenFailure;
|
2017-03-31 01:57:29 +02:00
|
|
|
PLOG(ERROR) << "open \"" << fn << "\" failed";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
allocate(sb.st_size, buffer);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::ReadFully(fd, buffer->data(), sb.st_size)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFreadFailure;
|
|
|
|
PLOG(ERROR) << "Failed to read " << sb.st_size << " bytes of data";
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:21:39 +02:00
|
|
|
size_t blocks = sb.st_size / BLOCKSIZE;
|
2018-05-31 01:14:14 +02:00
|
|
|
if (verify && VerifyBlocks(id, *buffer, blocks, true) != 0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(ERROR) << "unexpected contents in " << fn;
|
|
|
|
if (stash_map.find(id) == stash_map.end()) {
|
|
|
|
LOG(ERROR) << "failed to find source blocks number for stash " << id
|
|
|
|
<< " when executing command: " << params.cmdname;
|
|
|
|
} else {
|
|
|
|
const RangeSet& src = stash_map[id];
|
2018-05-31 01:14:14 +02:00
|
|
|
PrintHashForCorruptedStashedBlocks(id, *buffer, src);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
DeleteFile(fn);
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int WriteStash(const std::string& base, const std::string& id, int blocks,
|
2018-05-31 01:14:14 +02:00
|
|
|
const std::vector<uint8_t>& buffer, bool checkspace, bool* exists) {
|
|
|
|
if (base.empty()) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-07-12 00:55:32 +02:00
|
|
|
if (checkspace && !CheckAndFreeSpaceOnCache(blocks * BLOCKSIZE)) {
|
2018-05-31 01:14:14 +02:00
|
|
|
LOG(ERROR) << "not enough space to write stash";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
std::string fn = GetStashFileName(base, id, ".partial");
|
|
|
|
std::string cn = GetStashFileName(base, id, "");
|
2015-04-17 13:50:31 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (exists) {
|
|
|
|
struct stat sb;
|
|
|
|
int res = stat(cn.c_str(), &sb);
|
|
|
|
|
|
|
|
if (res == 0) {
|
|
|
|
// The file already exists and since the name is the hash of the contents,
|
|
|
|
// it's safe to assume the contents are identical (accidental hash collisions
|
|
|
|
// are unlikely)
|
|
|
|
LOG(INFO) << " skipping " << blocks << " existing blocks in " << cn;
|
|
|
|
*exists = true;
|
|
|
|
return 0;
|
2015-04-17 13:50:31 +02:00
|
|
|
}
|
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
*exists = false;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
LOG(INFO) << " writing " << blocks << " blocks to " << cn;
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
android::base::unique_fd fd(
|
2018-08-27 19:50:31 +02:00
|
|
|
TEMP_FAILURE_RETRY(open(fn.c_str(), O_WRONLY | O_CREAT | O_TRUNC, STASH_FILE_MODE)));
|
2018-05-31 01:14:14 +02:00
|
|
|
if (fd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
failure_type = errno == EIO ? kEioFailure : kFileOpenFailure;
|
2018-05-31 01:14:14 +02:00
|
|
|
PLOG(ERROR) << "failed to create \"" << fn << "\"";
|
|
|
|
return -1;
|
|
|
|
}
|
2017-03-22 00:24:57 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (fchown(fd, AID_SYSTEM, AID_SYSTEM) != 0) { // system user
|
|
|
|
PLOG(ERROR) << "failed to chown \"" << fn << "\"";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::WriteFully(fd, buffer.data(), blocks * BLOCKSIZE)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFwriteFailure;
|
|
|
|
PLOG(ERROR) << "Failed to write " << blocks * BLOCKSIZE << " bytes of data";
|
2018-05-31 01:14:14 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (fsync(fd) == -1) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFsyncFailure;
|
2018-05-31 01:14:14 +02:00
|
|
|
PLOG(ERROR) << "fsync \"" << fn << "\" failed";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (rename(fn.c_str(), cn.c_str()) == -1) {
|
|
|
|
PLOG(ERROR) << "rename(\"" << fn << "\", \"" << cn << "\") failed";
|
|
|
|
return -1;
|
|
|
|
}
|
2015-08-01 00:56:44 +02:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
std::string dname = GetStashFileName(base, "", "");
|
|
|
|
if (!FsyncDir(dname)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a directory for storing stash files and checks if the /cache partition
|
|
|
|
// hash enough space for the expected amount of blocks we need to store. Returns
|
|
|
|
// >0 if we created the directory, zero if it existed already, and <0 of failure.
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
static int CreateStash(State* state, size_t maxblocks, const std::string& base) {
|
2016-12-28 23:44:05 +01:00
|
|
|
std::string dirname = GetStashFileName(base, "", "");
|
|
|
|
struct stat sb;
|
|
|
|
int res = stat(dirname.c_str(), &sb);
|
|
|
|
if (res == -1 && errno != ENOENT) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "stat \"%s\" failed: %s", dirname.c_str(),
|
2016-12-28 23:44:05 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t max_stash_size = maxblocks * BLOCKSIZE;
|
|
|
|
if (res == -1) {
|
2016-12-28 23:44:05 +01:00
|
|
|
LOG(INFO) << "creating stash " << dirname;
|
2019-01-11 22:52:33 +01:00
|
|
|
res = mkdir_recursively(dirname, STASH_DIRECTORY_MODE, false, nullptr);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
if (res != 0) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "mkdir \"%s\" failed: %s", dirname.c_str(),
|
2016-12-28 23:44:05 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-22 00:24:57 +01:00
|
|
|
if (chown(dirname.c_str(), AID_SYSTEM, AID_SYSTEM) != 0) { // system user
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "chown \"%s\" failed: %s", dirname.c_str(),
|
2017-03-22 00:24:57 +01:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-07-12 00:55:32 +02:00
|
|
|
if (!CheckAndFreeSpaceOnCache(max_stash_size)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "not enough space for stash (%zu needed)",
|
2016-12-28 23:44:05 +01:00
|
|
|
max_stash_size);
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
return 1; // Created directory
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
LOG(INFO) << "using existing stash " << dirname;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
// If the directory already exists, calculate the space already allocated to stash files and check
|
|
|
|
// if there's enough for all required blocks. Delete any partially completed stash files first.
|
|
|
|
EnumerateStash(dirname, [](const std::string& fn) {
|
|
|
|
if (android::base::EndsWith(fn, ".partial")) {
|
|
|
|
DeleteFile(fn);
|
|
|
|
}
|
|
|
|
});
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
size_t existing = 0;
|
2017-03-16 01:39:01 +01:00
|
|
|
EnumerateStash(dirname, [&existing](const std::string& fn) {
|
|
|
|
if (fn.empty()) return;
|
|
|
|
struct stat sb;
|
|
|
|
if (stat(fn.c_str(), &sb) == -1) {
|
|
|
|
PLOG(ERROR) << "stat \"" << fn << "\" failed";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
existing += static_cast<size_t>(sb.st_size);
|
|
|
|
});
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
if (max_stash_size > existing) {
|
|
|
|
size_t needed = max_stash_size - existing;
|
2018-07-12 00:55:32 +02:00
|
|
|
if (!CheckAndFreeSpaceOnCache(needed)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kStashCreationFailure, "not enough space for stash (%zu more needed)",
|
2016-12-28 23:44:05 +01:00
|
|
|
needed);
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2016-12-28 23:44:05 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2016-12-28 23:44:05 +01:00
|
|
|
return 0; // Using existing directory
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 01:56:27 +01:00
|
|
|
static int FreeStash(const std::string& base, const std::string& id) {
|
2017-03-16 01:39:01 +01:00
|
|
|
if (base.empty() || id.empty()) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
DeleteFile(GetStashFileName(base, id, ""));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-16 01:39:01 +01:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
// Source contains packed data, which we want to move to the locations given in locs in the dest
|
|
|
|
// buffer. source and dest may be the same buffer.
|
2015-08-28 01:41:21 +02:00
|
|
|
static void MoveRange(std::vector<uint8_t>& dest, const RangeSet& locs,
|
2017-03-31 01:57:29 +02:00
|
|
|
const std::vector<uint8_t>& source) {
|
|
|
|
const uint8_t* from = source.data();
|
|
|
|
uint8_t* to = dest.data();
|
|
|
|
size_t start = locs.blocks();
|
|
|
|
// Must do the movement backward.
|
|
|
|
for (auto it = locs.crbegin(); it != locs.crend(); it++) {
|
|
|
|
size_t blocks = it->second - it->first;
|
|
|
|
start -= blocks;
|
|
|
|
memmove(to + (it->first * BLOCKSIZE), from + (start * BLOCKSIZE), blocks * BLOCKSIZE);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
/**
|
|
|
|
* We expect to parse the remainder of the parameter tokens as one of:
|
|
|
|
*
|
|
|
|
* <src_block_count> <src_range>
|
|
|
|
* (loads data from source image only)
|
|
|
|
*
|
|
|
|
* <src_block_count> - <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from stashes only)
|
|
|
|
*
|
|
|
|
* <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from both source image and stashes)
|
|
|
|
*
|
|
|
|
* On return, params.buffer is filled with the loaded source data (rearranged and combined with
|
|
|
|
* stashed data as necessary). buffer may be reallocated if needed to accommodate the source data.
|
|
|
|
* tgt is the target RangeSet for detecting overlaps. Any stashes required are loaded using
|
|
|
|
* LoadStash.
|
|
|
|
*/
|
|
|
|
static int LoadSourceBlocks(CommandParameters& params, const RangeSet& tgt, size_t* src_blocks,
|
|
|
|
bool* overlap) {
|
|
|
|
CHECK(src_blocks != nullptr);
|
|
|
|
CHECK(overlap != nullptr);
|
|
|
|
|
|
|
|
// <src_block_count>
|
|
|
|
const std::string& token = params.tokens[params.cpos++];
|
|
|
|
if (!android::base::ParseUint(token, src_blocks)) {
|
|
|
|
LOG(ERROR) << "invalid src_block_count \"" << token << "\"";
|
|
|
|
return -1;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
allocate(*src_blocks * BLOCKSIZE, ¶ms.buffer);
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// "-" or <src_range> [<src_loc>]
|
|
|
|
if (params.tokens[params.cpos] == "-") {
|
|
|
|
// no source ranges, only stashes
|
|
|
|
params.cpos++;
|
|
|
|
} else {
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(src));
|
2017-03-26 22:36:49 +02:00
|
|
|
*overlap = src.Overlaps(tgt);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (ReadBlocks(src, ¶ms.buffer, params.fd) == -1) {
|
2017-03-23 22:43:44 +01:00
|
|
|
return -1;
|
2015-12-07 01:56:27 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
// no stashes, only source range
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet locs = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(locs));
|
2017-03-23 22:43:44 +01:00
|
|
|
MoveRange(params.buffer, locs, params.buffer);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// <[stash_id:stash_range]>
|
|
|
|
while (params.cpos < params.tokens.size()) {
|
|
|
|
// Each word is a an index into the stash table, a colon, and then a RangeSet describing where
|
|
|
|
// in the source block that stashed data should go.
|
|
|
|
std::vector<std::string> tokens = android::base::Split(params.tokens[params.cpos++], ":");
|
|
|
|
if (tokens.size() != 2) {
|
|
|
|
LOG(ERROR) << "invalid parameter";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
std::vector<uint8_t> stash;
|
2018-05-31 01:14:14 +02:00
|
|
|
if (LoadStash(params, tokens[0], false, &stash, true) == -1) {
|
2017-03-23 22:43:44 +01:00
|
|
|
// These source blocks will fail verification if used later, but we
|
|
|
|
// will let the caller decide if this is a fatal failure
|
|
|
|
LOG(ERROR) << "failed to load stash " << tokens[0];
|
|
|
|
continue;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet locs = RangeSet::Parse(tokens[1]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(locs));
|
2017-03-23 22:43:44 +01:00
|
|
|
MoveRange(params.buffer, locs, stash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
/**
|
|
|
|
* Do a source/target load for move/bsdiff/imgdiff in version 3.
|
|
|
|
*
|
|
|
|
* We expect to parse the remainder of the parameter tokens as one of:
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> <src_range>
|
|
|
|
* (loads data from source image only)
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> - <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from stashes only)
|
|
|
|
*
|
|
|
|
* <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
|
|
|
|
* (loads data from both source image and stashes)
|
|
|
|
*
|
2017-03-23 22:43:44 +01:00
|
|
|
* 'onehash' tells whether to expect separate source and targe block hashes, or if they are both the
|
|
|
|
* same and only one hash should be expected. params.isunresumable will be set to true if block
|
2017-03-13 22:57:34 +01:00
|
|
|
* verification fails in a way that the update cannot be resumed anymore.
|
|
|
|
*
|
|
|
|
* If the function is unable to load the necessary blocks or their contents don't match the hashes,
|
|
|
|
* the return value is -1 and the command should be aborted.
|
|
|
|
*
|
|
|
|
* If the return value is 1, the command has already been completed according to the contents of the
|
|
|
|
* target blocks, and should not be performed again.
|
|
|
|
*
|
|
|
|
* If the return value is 0, source blocks have expected content and the command can be performed.
|
|
|
|
*/
|
2018-05-31 01:14:14 +02:00
|
|
|
static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet* tgt, size_t* src_blocks,
|
2018-06-08 07:27:44 +02:00
|
|
|
bool onehash) {
|
2017-03-23 22:43:44 +01:00
|
|
|
CHECK(src_blocks != nullptr);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing source hash";
|
|
|
|
return -1;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
std::string srchash = params.tokens[params.cpos++];
|
|
|
|
std::string tgthash;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (onehash) {
|
|
|
|
tgthash = srchash;
|
|
|
|
} else {
|
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target hash";
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-23 22:43:44 +01:00
|
|
|
tgthash = params.tokens[params.cpos++];
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// At least it needs to provide three parameters: <tgt_range>, <src_block_count> and
|
|
|
|
// "-"/<src_range>.
|
|
|
|
if (params.cpos + 2 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "invalid parameters";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// <tgt_range>
|
2018-05-31 01:14:14 +02:00
|
|
|
*tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
|
|
|
CHECK(static_cast<bool>(*tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
std::vector<uint8_t> tgtbuffer(tgt->blocks() * BLOCKSIZE);
|
|
|
|
if (ReadBlocks(*tgt, &tgtbuffer, params.fd) == -1) {
|
2017-03-23 22:43:44 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Return now if target blocks already have expected content.
|
2018-05-31 01:14:14 +02:00
|
|
|
if (VerifyBlocks(tgthash, tgtbuffer, tgt->blocks(), false) == 0) {
|
2017-03-23 22:43:44 +01:00
|
|
|
return 1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Load source blocks.
|
2018-06-08 07:27:44 +02:00
|
|
|
bool overlap = false;
|
|
|
|
if (LoadSourceBlocks(params, *tgt, src_blocks, &overlap) == -1) {
|
2017-03-23 22:43:44 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
if (VerifyBlocks(srchash, params.buffer, *src_blocks, true) == 0) {
|
2018-06-08 07:27:44 +02:00
|
|
|
// If source and target blocks overlap, stash the source blocks so we can resume from possible
|
|
|
|
// write errors. In verify mode, we can skip stashing because the source blocks won't be
|
|
|
|
// overwritten.
|
|
|
|
if (overlap && params.canwrite) {
|
2017-03-23 22:43:44 +01:00
|
|
|
LOG(INFO) << "stashing " << *src_blocks << " overlapping blocks to " << srchash;
|
|
|
|
|
|
|
|
bool stash_exists = false;
|
|
|
|
if (WriteStash(params.stashbase, srchash, *src_blocks, params.buffer, true,
|
|
|
|
&stash_exists) != 0) {
|
|
|
|
LOG(ERROR) << "failed to stash overlapping source blocks";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
params.stashed += *src_blocks;
|
|
|
|
// Can be deleted when the write has completed.
|
|
|
|
if (!stash_exists) {
|
|
|
|
params.freestash = srchash;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Source blocks have expected content, command can proceed.
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-06-08 07:27:44 +02:00
|
|
|
if (overlap && LoadStash(params, srchash, true, ¶ms.buffer, true) == 0) {
|
2017-03-23 22:43:44 +01:00
|
|
|
// Overlapping source blocks were previously stashed, command can proceed. We are recovering
|
|
|
|
// from an interrupted command, so we don't know if the stash can safely be deleted after this
|
|
|
|
// command.
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-16 00:52:46 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
// Valid source data not available, update cannot be resumed.
|
|
|
|
LOG(ERROR) << "partition has unexpected contents";
|
|
|
|
PrintHashForCorruptedSourceBlocks(params, params.buffer);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 22:43:44 +01:00
|
|
|
params.isunresumable = true;
|
|
|
|
|
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandMove(CommandParameters& params) {
|
2017-03-13 22:57:34 +01:00
|
|
|
size_t blocks = 0;
|
|
|
|
RangeSet tgt;
|
2018-06-08 07:27:44 +02:00
|
|
|
int status = LoadSrcTgtVersion3(params, &tgt, &blocks, true);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (status == -1) {
|
|
|
|
LOG(ERROR) << "failed to read blocks for move";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (status == 0) {
|
|
|
|
params.foundwrites = true;
|
2017-12-05 20:04:17 +01:00
|
|
|
} else {
|
|
|
|
params.target_verified = true;
|
|
|
|
if (params.foundwrites) {
|
|
|
|
LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
if (status == 0) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << " moving " << blocks << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (WriteBlocks(tgt, params.buffer, params.fd) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LOG(INFO) << "skipping " << blocks << " already moved blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (!params.freestash.empty()) {
|
|
|
|
FreeStash(params.stashbase, params.freestash);
|
|
|
|
params.freestash.clear();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandStash(CommandParameters& params) {
|
2017-03-23 23:28:20 +01:00
|
|
|
// <stash_id> <src_range>
|
|
|
|
if (params.cpos + 1 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing id and/or src range fields in stash command";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
const std::string& id = params.tokens[params.cpos++];
|
2018-05-31 01:14:14 +02:00
|
|
|
if (LoadStash(params, id, true, ¶ms.buffer, false) == 0) {
|
2017-03-23 23:28:20 +01:00
|
|
|
// Stash file already exists and has expected contents. Do not read from source again, as the
|
|
|
|
// source may have been already overwritten during a previous attempt.
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet src = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(src));
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2018-05-31 01:21:39 +02:00
|
|
|
size_t blocks = src.blocks();
|
2018-05-31 01:14:14 +02:00
|
|
|
allocate(blocks * BLOCKSIZE, ¶ms.buffer);
|
|
|
|
if (ReadBlocks(src, ¶ms.buffer, params.fd) == -1) {
|
2017-03-23 23:28:20 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
stash_map[id] = src;
|
2016-03-23 02:08:12 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
if (VerifyBlocks(id, params.buffer, blocks, true) != 0) {
|
|
|
|
// Source blocks have unexpected contents. If we actually need this data later, this is an
|
|
|
|
// unrecoverable error. However, the command that uses the data may have already completed
|
|
|
|
// previously, so the possible failure will occur during source block verification.
|
|
|
|
LOG(ERROR) << "failed to load source blocks for stash " << id;
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-23 23:28:20 +01:00
|
|
|
// In verify mode, we don't need to stash any blocks.
|
|
|
|
if (!params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
return 0;
|
2017-03-23 23:28:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << "stashing " << blocks << " blocks to " << id;
|
2017-12-05 20:04:17 +01:00
|
|
|
int result = WriteStash(params.stashbase, id, blocks, params.buffer, false, nullptr);
|
|
|
|
if (result == 0) {
|
|
|
|
params.stashed += blocks;
|
|
|
|
}
|
|
|
|
return result;
|
2017-03-23 23:28:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int PerformCommandFree(CommandParameters& params) {
|
|
|
|
// <stash_id>
|
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing stash id in free command";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string& id = params.tokens[params.cpos++];
|
|
|
|
stash_map.erase(id);
|
|
|
|
|
|
|
|
if (params.createdstash || params.canwrite) {
|
|
|
|
return FreeStash(params.stashbase, id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandZero(CommandParameters& params) {
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for zero";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " zeroing " << tgt.blocks() << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
allocate(BLOCKSIZE, ¶ms.buffer);
|
2017-03-31 01:57:29 +02:00
|
|
|
memset(params.buffer.data(), 0, BLOCKSIZE);
|
|
|
|
|
|
|
|
if (params.canwrite) {
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : tgt) {
|
|
|
|
off64_t offset = static_cast<off64_t>(begin) * BLOCKSIZE;
|
|
|
|
size_t size = (end - begin) * BLOCKSIZE;
|
2017-03-31 01:57:29 +02:00
|
|
|
if (!discard_blocks(params.fd, offset, size)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (!check_lseek(params.fd, offset, SEEK_SET)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-08-28 19:09:13 +02:00
|
|
|
for (size_t j = begin; j < end; ++j) {
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::WriteFully(params.fd, params.buffer.data(), BLOCKSIZE)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFwriteFailure;
|
|
|
|
PLOG(ERROR) << "Failed to write " << BLOCKSIZE << " bytes of data";
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cmdname[0] == 'z') {
|
|
|
|
// Update only for the zero command, as the erase command will call
|
|
|
|
// this if DEBUG_ERASE is defined.
|
|
|
|
params.written += tgt.blocks();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandNew(CommandParameters& params) {
|
2017-03-26 23:03:52 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for new";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 22:36:49 +02:00
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
if (params.canwrite) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << " writing " << tgt.blocks() << " blocks of new data";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_lock(¶ms.nti.mu);
|
2017-07-18 20:29:40 +02:00
|
|
|
params.nti.writer = std::make_unique<RangeSinkWriter>(params.fd, tgt);
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_broadcast(¶ms.nti.cv);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
while (params.nti.writer != nullptr) {
|
2017-04-04 05:01:17 +02:00
|
|
|
if (!params.nti.receiver_available) {
|
|
|
|
LOG(ERROR) << "missing " << (tgt.blocks() * BLOCKSIZE - params.nti.writer->BytesWritten())
|
|
|
|
<< " bytes of new data";
|
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_cond_wait(¶ms.nti.cv, ¶ms.nti.mu);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2017-03-26 23:03:52 +02:00
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandDiff(CommandParameters& params) {
|
2017-02-01 19:20:10 +01:00
|
|
|
// <offset> <length>
|
|
|
|
if (params.cpos + 1 >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing patch offset or length for " << params.cmdname;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
size_t offset;
|
|
|
|
if (!android::base::ParseUint(params.tokens[params.cpos++], &offset)) {
|
|
|
|
LOG(ERROR) << "invalid patch offset";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
size_t len;
|
|
|
|
if (!android::base::ParseUint(params.tokens[params.cpos++], &len)) {
|
|
|
|
LOG(ERROR) << "invalid patch len";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
RangeSet tgt;
|
|
|
|
size_t blocks = 0;
|
2018-06-08 07:27:44 +02:00
|
|
|
int status = LoadSrcTgtVersion3(params, &tgt, &blocks, false);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (status == -1) {
|
|
|
|
LOG(ERROR) << "failed to read blocks for diff";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (status == 0) {
|
|
|
|
params.foundwrites = true;
|
2017-12-05 20:04:17 +01:00
|
|
|
} else {
|
|
|
|
params.target_verified = true;
|
|
|
|
if (params.foundwrites) {
|
|
|
|
LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
|
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (params.canwrite) {
|
2014-12-09 17:39:47 +01:00
|
|
|
if (status == 0) {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "patching " << blocks << " blocks to " << tgt.blocks();
|
2017-02-01 19:20:10 +01:00
|
|
|
Value patch_value(
|
2018-06-20 00:56:49 +02:00
|
|
|
Value::Type::BLOB,
|
|
|
|
std::string(reinterpret_cast<const char*>(params.patch_start + offset), len));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-26 23:03:52 +02:00
|
|
|
RangeSinkWriter writer(params.fd, tgt);
|
2017-02-01 19:20:10 +01:00
|
|
|
if (params.cmdname[0] == 'i') { // imgdiff
|
2017-11-10 20:49:53 +01:00
|
|
|
if (ApplyImagePatch(params.buffer.data(), blocks * BLOCKSIZE, patch_value,
|
2017-03-26 23:03:52 +02:00
|
|
|
std::bind(&RangeSinkWriter::Write, &writer, std::placeholders::_1,
|
|
|
|
std::placeholders::_2),
|
2018-04-20 06:02:13 +02:00
|
|
|
nullptr) != 0) {
|
2017-02-01 19:20:10 +01:00
|
|
|
LOG(ERROR) << "Failed to apply image patch.";
|
2017-05-17 00:51:46 +02:00
|
|
|
failure_type = kPatchApplicationFailure;
|
2017-02-01 19:20:10 +01:00
|
|
|
return -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
} else {
|
2017-11-10 20:49:53 +01:00
|
|
|
if (ApplyBSDiffPatch(params.buffer.data(), blocks * BLOCKSIZE, patch_value, 0,
|
2017-03-26 23:03:52 +02:00
|
|
|
std::bind(&RangeSinkWriter::Write, &writer, std::placeholders::_1,
|
2018-04-20 06:02:13 +02:00
|
|
|
std::placeholders::_2)) != 0) {
|
2017-02-01 19:20:10 +01:00
|
|
|
LOG(ERROR) << "Failed to apply bsdiff patch.";
|
2017-05-17 00:51:46 +02:00
|
|
|
failure_type = kPatchApplicationFailure;
|
2017-02-01 19:20:10 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
// We expect the output of the patcher to fill the tgt ranges exactly.
|
2017-03-26 23:03:52 +02:00
|
|
|
if (!writer.Finished()) {
|
2018-11-02 23:44:07 +01:00
|
|
|
LOG(ERROR) << "Failed to fully write target blocks (range sink underrun): Missing "
|
|
|
|
<< writer.AvailableSpace() << " bytes";
|
|
|
|
failure_type = kPatchApplicationFailure;
|
|
|
|
return -1;
|
2017-02-01 19:20:10 +01:00
|
|
|
}
|
|
|
|
} else {
|
2017-03-31 01:57:29 +02:00
|
|
|
LOG(INFO) << "skipping " << blocks << " blocks already patched to " << tgt.blocks() << " ["
|
2017-02-01 19:20:10 +01:00
|
|
|
<< params.cmdline << "]";
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-02-01 19:20:10 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-02-01 19:20:10 +01:00
|
|
|
if (!params.freestash.empty()) {
|
|
|
|
FreeStash(params.stashbase, params.freestash);
|
|
|
|
params.freestash.clear();
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
params.written += tgt.blocks();
|
2017-02-01 19:20:10 +01:00
|
|
|
|
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:41:21 +02:00
|
|
|
static int PerformCommandErase(CommandParameters& params) {
|
2017-03-31 01:57:29 +02:00
|
|
|
if (DEBUG_ERASE) {
|
|
|
|
return PerformCommandZero(params);
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
struct stat sb;
|
|
|
|
if (fstat(params.fd, &sb) == -1) {
|
|
|
|
PLOG(ERROR) << "failed to fstat device to erase";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (!S_ISBLK(sb.st_mode)) {
|
|
|
|
LOG(ERROR) << "not a block device; skipping erase";
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.cpos >= params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "missing target blocks for erase";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
RangeSet tgt = RangeSet::Parse(params.tokens[params.cpos++]);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(tgt));
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (params.canwrite) {
|
|
|
|
LOG(INFO) << " erasing " << tgt.blocks() << " blocks";
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : tgt) {
|
2019-01-04 20:14:19 +01:00
|
|
|
off64_t offset = static_cast<off64_t>(begin) * BLOCKSIZE;
|
|
|
|
size_t size = (end - begin) * BLOCKSIZE;
|
|
|
|
if (!discard_blocks(params.fd, offset, size, true /* force */)) {
|
2017-03-31 01:57:29 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-31 01:57:29 +02:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
return 0;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
static int PerformCommandAbort(CommandParameters&) {
|
|
|
|
LOG(INFO) << "Aborting as instructed";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-08-02 01:40:00 +02:00
|
|
|
// Computes the hash_tree bytes based on the parameters, checks if the root hash of the tree
|
|
|
|
// matches the expected hash and writes the result to the specified range on the block_device.
|
|
|
|
// Hash_tree computation arguments:
|
|
|
|
// hash_tree_ranges
|
|
|
|
// source_ranges
|
|
|
|
// hash_algorithm
|
|
|
|
// salt_hex
|
|
|
|
// root_hash
|
|
|
|
static int PerformCommandComputeHashTree(CommandParameters& params) {
|
|
|
|
if (params.cpos + 5 != params.tokens.size()) {
|
|
|
|
LOG(ERROR) << "Invaild arguments count in hash computation " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expects the hash_tree data to be contiguous.
|
|
|
|
RangeSet hash_tree_ranges = RangeSet::Parse(params.tokens[params.cpos++]);
|
|
|
|
if (!hash_tree_ranges || hash_tree_ranges.size() != 1) {
|
|
|
|
LOG(ERROR) << "Invalid hash tree ranges in " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
RangeSet source_ranges = RangeSet::Parse(params.tokens[params.cpos++]);
|
|
|
|
if (!source_ranges) {
|
|
|
|
LOG(ERROR) << "Invalid source ranges in " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto hash_function = HashTreeBuilder::HashFunction(params.tokens[params.cpos++]);
|
|
|
|
if (hash_function == nullptr) {
|
|
|
|
LOG(ERROR) << "Invalid hash algorithm in " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<unsigned char> salt;
|
|
|
|
std::string salt_hex = params.tokens[params.cpos++];
|
|
|
|
if (salt_hex.empty() || !HashTreeBuilder::ParseBytesArrayFromString(salt_hex, &salt)) {
|
|
|
|
LOG(ERROR) << "Failed to parse salt in " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string expected_root_hash = params.tokens[params.cpos++];
|
|
|
|
if (expected_root_hash.empty()) {
|
|
|
|
LOG(ERROR) << "Invalid root hash in " << params.cmdline;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starts the hash_tree computation.
|
|
|
|
HashTreeBuilder builder(BLOCKSIZE, hash_function);
|
2019-01-17 18:26:12 +01:00
|
|
|
if (!builder.Initialize(static_cast<int64_t>(source_ranges.blocks()) * BLOCKSIZE, salt)) {
|
2018-08-02 01:40:00 +02:00
|
|
|
LOG(ERROR) << "Failed to initialize hash tree computation, source " << source_ranges.ToString()
|
|
|
|
<< ", salt " << salt_hex;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterates through every block in the source_ranges and updates the hash tree structure
|
|
|
|
// accordingly.
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : source_ranges) {
|
2018-08-02 01:40:00 +02:00
|
|
|
uint8_t buffer[BLOCKSIZE];
|
2018-08-28 19:09:13 +02:00
|
|
|
if (!check_lseek(params.fd, static_cast<off64_t>(begin) * BLOCKSIZE, SEEK_SET)) {
|
|
|
|
PLOG(ERROR) << "Failed to seek to block: " << begin;
|
2018-08-02 01:40:00 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-08-28 19:09:13 +02:00
|
|
|
for (size_t i = begin; i < end; i++) {
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::ReadFully(params.fd, buffer, BLOCKSIZE)) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFreadFailure;
|
2018-08-28 19:09:13 +02:00
|
|
|
LOG(ERROR) << "Failed to read data in " << begin << ":" << end;
|
2018-08-02 01:40:00 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!builder.Update(reinterpret_cast<unsigned char*>(buffer), BLOCKSIZE)) {
|
|
|
|
LOG(ERROR) << "Failed to update hash tree builder";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!builder.BuildHashTree()) {
|
|
|
|
LOG(ERROR) << "Failed to build hash tree";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string root_hash_hex = HashTreeBuilder::BytesArrayToString(builder.root_hash());
|
|
|
|
if (root_hash_hex != expected_root_hash) {
|
|
|
|
LOG(ERROR) << "Root hash of the verity hash tree doesn't match the expected value. Expected: "
|
|
|
|
<< expected_root_hash << ", actual: " << root_hash_hex;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t write_offset = static_cast<uint64_t>(hash_tree_ranges.GetBlockNumber(0)) * BLOCKSIZE;
|
|
|
|
if (params.canwrite && !builder.WriteHashTreeToFd(params.fd, write_offset)) {
|
|
|
|
LOG(ERROR) << "Failed to write hash tree to output";
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(xunchang) validates the written bytes
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
using CommandFunction = std::function<int(CommandParameters&)>;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
using CommandMap = std::unordered_map<Command::Type, CommandFunction>;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-12-20 01:21:55 +01:00
|
|
|
static bool Sha1DevicePath(const std::string& path, uint8_t digest[SHA_DIGEST_LENGTH]) {
|
|
|
|
auto device_name = android::base::Basename(path);
|
|
|
|
auto dm_target_name_path = "/sys/block/" + device_name + "/dm/name";
|
|
|
|
|
|
|
|
struct stat sb;
|
|
|
|
if (stat(dm_target_name_path.c_str(), &sb) == 0) {
|
|
|
|
// This is a device mapper target. Use partition name as part of the hash instead. Do not
|
|
|
|
// include extents as part of the hash, because the size of a partition may be shrunk after
|
|
|
|
// the patches are applied.
|
|
|
|
std::string dm_target_name;
|
|
|
|
if (!android::base::ReadFileToString(dm_target_name_path, &dm_target_name)) {
|
|
|
|
PLOG(ERROR) << "Cannot read " << dm_target_name_path;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
SHA1(reinterpret_cast<const uint8_t*>(dm_target_name.data()), dm_target_name.size(), digest);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
// This is a device mapper target, but its name cannot be retrieved.
|
|
|
|
PLOG(ERROR) << "Cannot get dm target name for " << path;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This doesn't appear to be a device mapper target, but if its name starts with dm-, something
|
|
|
|
// else might have gone wrong.
|
|
|
|
if (android::base::StartsWith(device_name, "dm-")) {
|
|
|
|
LOG(WARNING) << "Device " << path << " starts with dm- but is not mapped by device-mapper.";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stash directory should be different for each partition to avoid conflicts when updating
|
|
|
|
// multiple partitions at the same time, so we use the hash of the block device name as the base
|
|
|
|
// directory.
|
|
|
|
SHA1(reinterpret_cast<const uint8_t*>(path.data()), path.size(), digest);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
static Value* PerformBlockImageUpdate(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv,
|
2018-05-22 01:05:56 +02:00
|
|
|
const CommandMap& command_map, bool dryrun) {
|
2020-09-16 20:06:12 +02:00
|
|
|
CommandParameters params{};
|
2018-05-24 09:16:35 +02:00
|
|
|
stash_map.clear();
|
2017-03-13 22:57:34 +01:00
|
|
|
params.canwrite = !dryrun;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "performing " << (dryrun ? "verification" : "update");
|
|
|
|
if (state->is_retry) {
|
|
|
|
is_retry = true;
|
|
|
|
LOG(INFO) << "This update is a retry.";
|
|
|
|
}
|
|
|
|
if (argv.size() != 4) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
// args:
|
|
|
|
// - block device (or file) to modify in-place
|
|
|
|
// - transfer list (blob)
|
|
|
|
// - new data stream (filename within package.zip)
|
|
|
|
// - patch stream (filename within package.zip, must be uncompressed)
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& blockdev_filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& transfer_list_value = args[1];
|
|
|
|
const std::unique_ptr<Value>& new_data_fn = args[2];
|
|
|
|
const std::unique_ptr<Value>& patch_data_fn = args[3];
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-06-20 00:56:49 +02:00
|
|
|
if (blockdev_filename->type != Value::Type::STRING) {
|
2017-03-13 22:57:34 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2018-06-20 00:56:49 +02:00
|
|
|
if (transfer_list_value->type != Value::Type::BLOB) {
|
2017-03-13 22:57:34 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2018-06-20 00:56:49 +02:00
|
|
|
if (new_data_fn->type != Value::Type::STRING) {
|
2017-03-13 22:57:34 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2018-06-20 00:56:49 +02:00
|
|
|
if (patch_data_fn->type != Value::Type::STRING) {
|
2017-03-13 22:57:34 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
auto updater = state->updater;
|
|
|
|
auto block_device_path = updater->FindBlockDeviceName(blockdev_filename->data);
|
|
|
|
if (block_device_path.empty()) {
|
|
|
|
LOG(ERROR) << "Block device path for " << blockdev_filename->data << " not found. " << name
|
|
|
|
<< " failed.";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
ZipArchiveHandle za = updater->GetPackageHandle();
|
2019-05-03 10:05:04 +02:00
|
|
|
if (za == nullptr) {
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-04 07:52:37 +02:00
|
|
|
std::string_view path_data(patch_data_fn->data);
|
2020-09-16 20:06:12 +02:00
|
|
|
ZipEntry64 patch_entry;
|
2017-03-13 22:57:34 +01:00
|
|
|
if (FindEntry(za, path_data, &patch_entry) != 0) {
|
|
|
|
LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2019-05-03 10:05:04 +02:00
|
|
|
params.patch_start = updater->GetMappedPackageAddress() + patch_entry.offset;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-04 07:52:37 +02:00
|
|
|
std::string_view new_data(new_data_fn->data);
|
2020-09-16 20:06:12 +02:00
|
|
|
ZipEntry64 new_entry;
|
2017-03-13 22:57:34 +01:00
|
|
|
if (FindEntry(za, new_data, &new_entry) != 0) {
|
|
|
|
LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
params.fd.reset(TEMP_FAILURE_RETRY(open(block_device_path.c_str(), O_RDWR)));
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.fd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
failure_type = errno == EIO ? kEioFailure : kFileOpenFailure;
|
2019-05-14 19:54:43 +02:00
|
|
|
PLOG(ERROR) << "open \"" << block_device_path << "\" failed";
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
2019-05-14 19:54:43 +02:00
|
|
|
if (!Sha1DevicePath(block_device_path, digest)) {
|
2018-12-20 01:21:55 +01:00
|
|
|
return StringValue("");
|
|
|
|
}
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
params.stashbase = print_sha1(digest);
|
|
|
|
|
|
|
|
// Possibly do return early on retry, by checking the marker. If the update on this partition has
|
|
|
|
// been finished (but interrupted at a later point), there could be leftover on /cache that would
|
|
|
|
// fail the no-op retry.
|
|
|
|
std::string updated_marker = GetStashFileName(params.stashbase + ".UPDATED", "", "");
|
|
|
|
if (is_retry) {
|
|
|
|
struct stat sb;
|
|
|
|
int result = stat(updated_marker.c_str(), &sb);
|
|
|
|
if (result == 0) {
|
2019-05-14 19:54:43 +02:00
|
|
|
LOG(INFO) << "Skipping already updated partition " << block_device_path << " based on marker";
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
return StringValue("t");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Delete the obsolete marker if any.
|
|
|
|
std::string err;
|
|
|
|
if (!android::base::RemoveFileIfExists(updated_marker, &err)) {
|
|
|
|
LOG(ERROR) << "Failed to remove partition updated marker " << updated_marker << ": " << err;
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-07 18:56:19 +02:00
|
|
|
static constexpr size_t kTransferListHeaderLines = 4;
|
2017-03-13 22:57:34 +01:00
|
|
|
std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n");
|
2018-06-07 18:56:19 +02:00
|
|
|
if (lines.size() < kTransferListHeaderLines) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]",
|
2017-03-13 22:57:34 +01:00
|
|
|
lines.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-09-24 20:10:51 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// First line in transfer list is the version number.
|
|
|
|
if (!android::base::ParseInt(lines[0], ¶ms.version, 3, 4)) {
|
|
|
|
LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "blockimg version is " << params.version;
|
2015-12-07 01:56:27 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Second line in transfer list is the total number of blocks we expect to write.
|
|
|
|
size_t total_blocks;
|
|
|
|
if (!android::base::ParseUint(lines[1], &total_blocks)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]", lines[1].c_str());
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (total_blocks == 0) {
|
|
|
|
return StringValue("t");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Third line is how many stash entries are needed simultaneously.
|
|
|
|
LOG(INFO) << "maximum stash entries " << lines[2];
|
2015-09-24 20:10:51 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Fourth line is the maximum number of blocks that will be stashed simultaneously
|
|
|
|
size_t stash_max_blocks;
|
|
|
|
if (!android::base::ParseUint(lines[3], &stash_max_blocks)) {
|
2018-01-29 00:37:48 +01:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]",
|
2017-03-13 22:57:34 +01:00
|
|
|
lines[3].c_str());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
int res = CreateStash(state, stash_max_blocks, params.stashbase);
|
2017-03-13 22:57:34 +01:00
|
|
|
if (res == -1) {
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
params.createdstash = res;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-06-18 21:49:06 +02:00
|
|
|
// Set up the new data writer.
|
|
|
|
if (params.canwrite) {
|
|
|
|
params.nti.za = za;
|
|
|
|
params.nti.entry = new_entry;
|
|
|
|
params.nti.brotli_compressed = android::base::EndsWith(new_data_fn->data, ".br");
|
|
|
|
if (params.nti.brotli_compressed) {
|
|
|
|
// Initialize brotli decoder state.
|
|
|
|
params.nti.brotli_decoder_state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
|
|
|
|
}
|
|
|
|
params.nti.receiver_available = true;
|
|
|
|
|
|
|
|
pthread_mutex_init(¶ms.nti.mu, nullptr);
|
|
|
|
pthread_cond_init(¶ms.nti.cv, nullptr);
|
|
|
|
pthread_attr_t attr;
|
|
|
|
pthread_attr_init(&attr);
|
|
|
|
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
|
|
|
|
|
|
|
|
int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti);
|
|
|
|
if (error != 0) {
|
|
|
|
LOG(ERROR) << "pthread_create failed: " << strerror(error);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:59:55 +02:00
|
|
|
// When performing an update, save the index and cmdline of the current command into the
|
|
|
|
// last_command_file.
|
2017-12-05 20:04:17 +01:00
|
|
|
// Upon resuming an update, read the saved index first; then
|
|
|
|
// 1. In verification mode, check if the 'move' or 'diff' commands before the saved index has
|
|
|
|
// the expected target blocks already. If not, these commands cannot be skipped and we need
|
|
|
|
// to attempt to execute them again. Therefore, we will delete the last_command_file so that
|
|
|
|
// the update will resume from the start of the transfer list.
|
|
|
|
// 2. In update mode, skip all commands before the saved index. Therefore, we can avoid deleting
|
|
|
|
// stashes with duplicate id unintentionally (b/69858743); and also speed up the update.
|
|
|
|
// If an update succeeds or is unresumable, delete the last_command_file.
|
2018-05-21 23:59:55 +02:00
|
|
|
bool skip_executed_command = true;
|
|
|
|
size_t saved_last_command_index;
|
2017-12-05 20:04:17 +01:00
|
|
|
if (!ParseLastCommandFile(&saved_last_command_index)) {
|
|
|
|
DeleteLastCommandFile();
|
2018-05-21 23:59:55 +02:00
|
|
|
// We failed to parse the last command. Disallow skipping executed commands.
|
|
|
|
skip_executed_command = false;
|
2017-12-05 20:04:17 +01:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
int rc = -1;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Subsequent lines are all individual transfer commands
|
2018-05-21 23:48:49 +02:00
|
|
|
for (size_t i = kTransferListHeaderLines; i < lines.size(); i++) {
|
2017-12-05 20:04:17 +01:00
|
|
|
const std::string& line = lines[i];
|
2017-03-13 22:57:34 +01:00
|
|
|
if (line.empty()) continue;
|
2016-09-09 05:10:11 +02:00
|
|
|
|
2018-05-21 23:59:55 +02:00
|
|
|
size_t cmdindex = i - kTransferListHeaderLines;
|
2017-03-13 22:57:34 +01:00
|
|
|
params.tokens = android::base::Split(line, " ");
|
|
|
|
params.cpos = 0;
|
2018-05-22 01:05:56 +02:00
|
|
|
params.cmdname = params.tokens[params.cpos++];
|
|
|
|
params.cmdline = line;
|
2017-12-05 20:04:17 +01:00
|
|
|
params.target_verified = false;
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
Command::Type cmd_type = Command::ParseType(params.cmdname);
|
|
|
|
if (cmd_type == Command::Type::LAST) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(ERROR) << "unexpected command [" << params.cmdname << "]";
|
|
|
|
goto pbiudone;
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
const CommandFunction& performer = command_map.at(cmd_type);
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-02-28 02:05:39 +01:00
|
|
|
// Skip the command if we explicitly set the corresponding function pointer to nullptr, e.g.
|
|
|
|
// "erase" during block_image_verify.
|
2018-05-22 01:05:56 +02:00
|
|
|
if (performer == nullptr) {
|
2018-02-28 02:05:39 +01:00
|
|
|
LOG(DEBUG) << "skip executing command [" << line << "]";
|
|
|
|
continue;
|
2017-12-05 20:04:17 +01:00
|
|
|
}
|
|
|
|
|
2018-05-08 00:03:30 +02:00
|
|
|
// Skip all commands before the saved last command index when resuming an update, except for
|
|
|
|
// "new" command. Because new commands read in the data sequentially.
|
2018-05-21 23:59:55 +02:00
|
|
|
if (params.canwrite && skip_executed_command && cmdindex <= saved_last_command_index &&
|
2018-05-22 01:05:56 +02:00
|
|
|
cmd_type != Command::Type::NEW) {
|
2018-05-21 23:59:55 +02:00
|
|
|
LOG(INFO) << "Skipping already executed command: " << cmdindex
|
2017-12-05 20:04:17 +01:00
|
|
|
<< ", last executed command for previous update: " << saved_last_command_index;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-05-22 01:05:56 +02:00
|
|
|
if (performer(params) == -1) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(ERROR) << "failed to execute command [" << line << "]";
|
2018-08-02 01:40:00 +02:00
|
|
|
if (cmd_type == Command::Type::COMPUTE_HASH_TREE && failure_type == kNoCause) {
|
|
|
|
failure_type = kHashTreeComputationFailure;
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
goto pbiudone;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-05-21 23:59:55 +02:00
|
|
|
// In verify mode, check if the commands before the saved last_command_index have been executed
|
|
|
|
// correctly. If some target blocks have unexpected contents, delete the last command file so
|
|
|
|
// that we will resume the update from the first command in the transfer list.
|
|
|
|
if (!params.canwrite && skip_executed_command && cmdindex <= saved_last_command_index) {
|
2017-12-05 20:04:17 +01:00
|
|
|
// TODO(xunchang) check that the cmdline of the saved index is correct.
|
2018-05-22 01:05:56 +02:00
|
|
|
if ((cmd_type == Command::Type::MOVE || cmd_type == Command::Type::BSDIFF ||
|
|
|
|
cmd_type == Command::Type::IMGDIFF) &&
|
2017-12-05 20:04:17 +01:00
|
|
|
!params.target_verified) {
|
|
|
|
LOG(WARNING) << "Previously executed command " << saved_last_command_index << ": "
|
|
|
|
<< params.cmdline << " doesn't produce expected target blocks.";
|
2018-05-21 23:59:55 +02:00
|
|
|
skip_executed_command = false;
|
2017-12-05 20:04:17 +01:00
|
|
|
DeleteLastCommandFile();
|
|
|
|
}
|
|
|
|
}
|
2018-05-22 01:05:56 +02:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
2018-08-27 19:50:31 +02:00
|
|
|
if (fsync(params.fd) == -1) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFsyncFailure;
|
2016-11-23 01:29:50 +01:00
|
|
|
PLOG(ERROR) << "fsync failed";
|
2017-03-13 22:57:34 +01:00
|
|
|
goto pbiudone;
|
|
|
|
}
|
2018-05-16 00:09:59 +02:00
|
|
|
|
2018-05-21 23:59:55 +02:00
|
|
|
if (!UpdateLastCommandIndex(cmdindex, params.cmdline)) {
|
2018-05-16 00:09:59 +02:00
|
|
|
LOG(WARNING) << "Failed to update the last command file.";
|
|
|
|
}
|
|
|
|
|
2019-05-03 10:05:04 +02:00
|
|
|
updater->WriteToCommandPipe(
|
|
|
|
android::base::StringPrintf("set_progress %.4f",
|
|
|
|
static_cast<double>(params.written) / total_blocks),
|
|
|
|
true);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-10-18 22:15:21 +02:00
|
|
|
rc = 0;
|
|
|
|
|
|
|
|
pbiudone:
|
2017-03-13 22:57:34 +01:00
|
|
|
if (params.canwrite) {
|
2017-10-18 22:15:21 +02:00
|
|
|
pthread_mutex_lock(¶ms.nti.mu);
|
|
|
|
if (params.nti.receiver_available) {
|
|
|
|
LOG(WARNING) << "new data receiver is still available after executing all commands.";
|
|
|
|
}
|
|
|
|
params.nti.receiver_available = false;
|
|
|
|
pthread_cond_broadcast(¶ms.nti.cv);
|
|
|
|
pthread_mutex_unlock(¶ms.nti.mu);
|
|
|
|
int ret = pthread_join(params.thread, nullptr);
|
|
|
|
if (ret != 0) {
|
|
|
|
LOG(WARNING) << "pthread join returned with " << strerror(ret);
|
|
|
|
}
|
2017-03-13 22:57:34 +01:00
|
|
|
|
2017-10-18 22:15:21 +02:00
|
|
|
if (rc == 0) {
|
|
|
|
LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks;
|
|
|
|
LOG(INFO) << "stashed " << params.stashed << " blocks";
|
|
|
|
LOG(INFO) << "max alloc needed was " << params.buffer.size();
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
const char* partition = strrchr(block_device_path.c_str(), '/');
|
2017-10-18 22:15:21 +02:00
|
|
|
if (partition != nullptr && *(partition + 1) != 0) {
|
2019-05-03 10:05:04 +02:00
|
|
|
updater->WriteToCommandPipe(
|
|
|
|
android::base::StringPrintf("log bytes_written_%s: %" PRIu64, partition + 1,
|
|
|
|
static_cast<uint64_t>(params.written) * BLOCKSIZE));
|
|
|
|
updater->WriteToCommandPipe(
|
|
|
|
android::base::StringPrintf("log bytes_stashed_%s: %" PRIu64, partition + 1,
|
|
|
|
static_cast<uint64_t>(params.stashed) * BLOCKSIZE),
|
|
|
|
true);
|
2017-10-18 22:15:21 +02:00
|
|
|
}
|
|
|
|
// Delete stash only after successfully completing the update, as it may contain blocks needed
|
|
|
|
// to complete the update later.
|
|
|
|
DeleteStash(params.stashbase);
|
2017-12-05 20:04:17 +01:00
|
|
|
DeleteLastCommandFile();
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
|
|
|
|
// Create a marker on /cache partition, which allows skipping the update on this partition on
|
|
|
|
// retry. The marker will be removed once booting into normal boot, or before starting next
|
|
|
|
// fresh install.
|
2018-12-20 01:21:55 +01:00
|
|
|
if (!SetUpdatedMarker(updated_marker)) {
|
updater: Skip an updated partition on retry.
Prior to the change, the BBOTA updater would try to re-run all the
commands for a given partition on retry, including creating stashes
according to the list of commands. This could fail a retry when the
previous update had moved on to next stage, with leftovers in /cache.
This CL creates a marker on /cache upon successfully updating a
partition. The update commands will be skipped when trying to apply
updates on an updated partition. Note that the marker is expected to be
removed while doing a normal boot (in particular, handled by
RecoverySystem#handleAftermath). If that didn't happen, the updater
would also remove the marker before starting next fresh update.
Alternatively, we can achieve the same goal by changing the OTA script,
which needs to additionally compare the checksum against the target
build. For example,
range_sha1("/system", "ranges") == SHA1_of_updated_system ||
block_image_update("/system");
The downside is that we need to pay that cost on each install, as the
edify script doesn't support caching the result in a variable.
Bug: 79165963
Test: Simulate the process on device (by triggering a reboot while
updating /vendor). Check the update log and result.
Change-Id: I731031fa336133e1221b33edfc469969706e8091
2018-05-07 20:38:25 +02:00
|
|
|
LOG(WARNING) << "Failed to set updated marker; continuing";
|
|
|
|
}
|
2016-04-30 20:49:59 +02:00
|
|
|
}
|
2017-10-18 22:15:21 +02:00
|
|
|
|
|
|
|
pthread_mutex_destroy(¶ms.nti.mu);
|
|
|
|
pthread_cond_destroy(¶ms.nti.cv);
|
|
|
|
} else if (rc == 0) {
|
2017-03-13 22:57:34 +01:00
|
|
|
LOG(INFO) << "verified partition contents; update may be resumed";
|
|
|
|
}
|
2016-04-30 20:49:59 +02:00
|
|
|
|
2018-08-27 19:50:31 +02:00
|
|
|
if (fsync(params.fd) == -1) {
|
|
|
|
failure_type = errno == EIO ? kEioFailure : kFsyncFailure;
|
2017-03-13 22:57:34 +01:00
|
|
|
PLOG(ERROR) << "fsync failed";
|
|
|
|
}
|
|
|
|
// params.fd will be automatically closed because it's a unique_fd.
|
|
|
|
|
2017-06-30 02:04:21 +02:00
|
|
|
if (params.nti.brotli_decoder_state != nullptr) {
|
|
|
|
BrotliDecoderDestroyInstance(params.nti.brotli_decoder_state);
|
|
|
|
}
|
|
|
|
|
2017-12-05 20:04:17 +01:00
|
|
|
// Delete the last command file if the update cannot be resumed.
|
|
|
|
if (params.isunresumable) {
|
|
|
|
DeleteLastCommandFile();
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
// Only delete the stash if the update cannot be resumed, or it's a verification run and we
|
|
|
|
// created the stash.
|
|
|
|
if (params.isunresumable || (!params.canwrite && params.createdstash)) {
|
|
|
|
DeleteStash(params.stashbase);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (failure_type != kNoCause && state->cause_code == kNoCause) {
|
|
|
|
state->cause_code = failure_type;
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-13 22:57:34 +01:00
|
|
|
return StringValue(rc == 0 ? "t" : "");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The transfer list is a text file containing commands to transfer data from one place to another
|
|
|
|
* on the target partition. We parse it and execute the commands in order:
|
|
|
|
*
|
|
|
|
* zero [rangeset]
|
|
|
|
* - Fill the indicated blocks with zeros.
|
|
|
|
*
|
|
|
|
* new [rangeset]
|
|
|
|
* - Fill the blocks with data read from the new_data file.
|
|
|
|
*
|
|
|
|
* erase [rangeset]
|
|
|
|
* - Mark the given blocks as empty.
|
|
|
|
*
|
|
|
|
* move <...>
|
|
|
|
* bsdiff <patchstart> <patchlen> <...>
|
|
|
|
* imgdiff <patchstart> <patchlen> <...>
|
|
|
|
* - Read the source blocks, apply a patch (or not in the case of move), write result to target
|
|
|
|
* blocks. bsdiff or imgdiff specifies the type of patch; move means no patch at all.
|
|
|
|
*
|
|
|
|
* See the comments in LoadSrcTgtVersion3() for a description of the <...> format.
|
|
|
|
*
|
|
|
|
* stash <stash_id> <src_range>
|
|
|
|
* - Load the given source range and stash the data in the given slot of the stash table.
|
|
|
|
*
|
|
|
|
* free <stash_id>
|
|
|
|
* - Free the given stash data.
|
|
|
|
*
|
|
|
|
* The creator of the transfer list will guarantee that no block is read (ie, used as the source for
|
|
|
|
* a patch or move) after it has been written.
|
|
|
|
*
|
|
|
|
* The creator will guarantee that a given stash is loaded (with a stash command) before it's used
|
|
|
|
* in a move/bsdiff/imgdiff command.
|
|
|
|
*
|
|
|
|
* Within one command the source and target ranges may overlap so in general we need to read the
|
|
|
|
* entire source into memory before writing anything to the target blocks.
|
|
|
|
*
|
|
|
|
* All the patch data is concatenated into one patch_data file in the update package. It must be
|
|
|
|
* stored uncompressed because we memory-map it in directly from the archive. (Since patches are
|
|
|
|
* already compressed, we lose very little by not compressing their concatenation.)
|
|
|
|
*
|
|
|
|
* Commands that read data from the partition (i.e. move/bsdiff/imgdiff/stash) have one or more
|
|
|
|
* additional hashes before the range parameters, which are used to check if the command has already
|
|
|
|
* been completed and verify the integrity of the source data.
|
|
|
|
*/
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageVerifyFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2018-05-22 01:05:56 +02:00
|
|
|
// Commands which are not allowed are set to nullptr to skip them completely.
|
|
|
|
const CommandMap command_map{
|
|
|
|
// clang-format off
|
2018-08-02 01:40:00 +02:00
|
|
|
{ Command::Type::ABORT, PerformCommandAbort },
|
|
|
|
{ Command::Type::BSDIFF, PerformCommandDiff },
|
2019-05-09 19:58:10 +02:00
|
|
|
{ Command::Type::COMPUTE_HASH_TREE, nullptr },
|
2018-08-02 01:40:00 +02:00
|
|
|
{ Command::Type::ERASE, nullptr },
|
|
|
|
{ Command::Type::FREE, PerformCommandFree },
|
|
|
|
{ Command::Type::IMGDIFF, PerformCommandDiff },
|
|
|
|
{ Command::Type::MOVE, PerformCommandMove },
|
|
|
|
{ Command::Type::NEW, nullptr },
|
|
|
|
{ Command::Type::STASH, PerformCommandStash },
|
|
|
|
{ Command::Type::ZERO, nullptr },
|
2018-05-22 01:05:56 +02:00
|
|
|
// clang-format on
|
|
|
|
};
|
|
|
|
CHECK_EQ(static_cast<size_t>(Command::Type::LAST), command_map.size());
|
|
|
|
|
|
|
|
// Perform a dry run without writing to test if an update can proceed.
|
|
|
|
return PerformBlockImageUpdate(name, state, argv, command_map, true);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageUpdateFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2018-05-22 01:05:56 +02:00
|
|
|
const CommandMap command_map{
|
|
|
|
// clang-format off
|
2018-08-02 01:40:00 +02:00
|
|
|
{ Command::Type::ABORT, PerformCommandAbort },
|
|
|
|
{ Command::Type::BSDIFF, PerformCommandDiff },
|
|
|
|
{ Command::Type::COMPUTE_HASH_TREE, PerformCommandComputeHashTree },
|
|
|
|
{ Command::Type::ERASE, PerformCommandErase },
|
|
|
|
{ Command::Type::FREE, PerformCommandFree },
|
|
|
|
{ Command::Type::IMGDIFF, PerformCommandDiff },
|
|
|
|
{ Command::Type::MOVE, PerformCommandMove },
|
|
|
|
{ Command::Type::NEW, PerformCommandNew },
|
|
|
|
{ Command::Type::STASH, PerformCommandStash },
|
|
|
|
{ Command::Type::ZERO, PerformCommandZero },
|
2018-05-22 01:05:56 +02:00
|
|
|
// clang-format on
|
|
|
|
};
|
|
|
|
CHECK_EQ(static_cast<size_t>(Command::Type::LAST), command_map.size());
|
|
|
|
|
|
|
|
return PerformBlockImageUpdate(name, state, argv, command_map, false);
|
2014-12-09 17:39:47 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* RangeSha1Fn(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 2) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "range_sha1 expects 2 arguments, got %zu", argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& blockdev_filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& ranges = args[1];
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-06-20 00:56:49 +02:00
|
|
|
if (blockdev_filename->type != Value::Type::STRING) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2018-06-20 00:56:49 +02:00
|
|
|
if (ranges->type != Value::Type::STRING) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "ranges argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
auto block_device_path = state->updater->FindBlockDeviceName(blockdev_filename->data);
|
|
|
|
if (block_device_path.empty()) {
|
|
|
|
LOG(ERROR) << "Block device path for " << blockdev_filename->data << " not found. " << name
|
|
|
|
<< " failed.";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
android::base::unique_fd fd(open(block_device_path.c_str(), O_RDWR));
|
2017-03-31 10:18:13 +02:00
|
|
|
if (fd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
CauseCode cause_code = errno == EIO ? kEioFailure : kFileOpenFailure;
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, cause_code, "open \"%s\" failed: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
RangeSet rs = RangeSet::Parse(ranges->data);
|
2017-11-04 08:08:08 +01:00
|
|
|
CHECK(static_cast<bool>(rs));
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
SHA_CTX ctx;
|
|
|
|
SHA1_Init(&ctx);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<uint8_t> buffer(BLOCKSIZE);
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : rs) {
|
|
|
|
if (!check_lseek(fd, static_cast<off64_t>(begin) * BLOCKSIZE, SEEK_SET)) {
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, kLseekFailure, "failed to seek %s: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2018-08-28 19:09:13 +02:00
|
|
|
for (size_t j = begin; j < end; ++j) {
|
2018-08-27 19:50:31 +02:00
|
|
|
if (!android::base::ReadFully(fd, buffer.data(), BLOCKSIZE)) {
|
|
|
|
CauseCode cause_code = errno == EIO ? kEioFailure : kFreadFailure;
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, cause_code, "failed to read %s: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2014-12-09 17:39:47 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
SHA1_Update(&ctx, buffer.data(), BLOCKSIZE);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
2017-03-31 10:18:13 +02:00
|
|
|
}
|
|
|
|
uint8_t digest[SHA_DIGEST_LENGTH];
|
|
|
|
SHA1_Final(digest, &ctx);
|
2014-08-15 23:31:52 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue(print_sha1(digest));
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|
|
|
|
|
2015-12-15 20:47:30 +01:00
|
|
|
// This function checks if a device has been remounted R/W prior to an incremental
|
|
|
|
// OTA update. This is an common cause of update abortion. The function reads the
|
|
|
|
// 1st block of each partition and check for mounting time/count. It return string "t"
|
|
|
|
// if executes successfully and an empty string otherwise.
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* CheckFirstBlockFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 1) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "check_first_block expects 1 argument, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& arg_filename = args[0];
|
2016-10-18 03:15:20 +02:00
|
|
|
|
2018-06-20 00:56:49 +02:00
|
|
|
if (arg_filename->type != Value::Type::STRING) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
auto block_device_path = state->updater->FindBlockDeviceName(arg_filename->data);
|
|
|
|
if (block_device_path.empty()) {
|
|
|
|
LOG(ERROR) << "Block device path for " << arg_filename->data << " not found. " << name
|
|
|
|
<< " failed.";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
android::base::unique_fd fd(open(block_device_path.c_str(), O_RDONLY));
|
2017-03-31 10:18:13 +02:00
|
|
|
if (fd == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
CauseCode cause_code = errno == EIO ? kEioFailure : kFileOpenFailure;
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, cause_code, "open \"%s\" failed: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
RangeSet blk0(std::vector<Range>{ Range{ 0, 1 } });
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<uint8_t> block0_buffer(BLOCKSIZE);
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2018-05-31 01:14:14 +02:00
|
|
|
if (ReadBlocks(blk0, &block0_buffer, fd) == -1) {
|
2018-08-27 19:50:31 +02:00
|
|
|
CauseCode cause_code = errno == EIO ? kEioFailure : kFreadFailure;
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, cause_code, "failed to read %s: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
|
|
|
|
// Super block starts from block 0, offset 0x400
|
|
|
|
// 0x2C: len32 Mount time
|
|
|
|
// 0x30: len32 Write time
|
|
|
|
// 0x34: len16 Number of mounts since the last fsck
|
|
|
|
// 0x38: len16 Magic signature 0xEF53
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
time_t mount_time = *reinterpret_cast<uint32_t*>(&block0_buffer[0x400 + 0x2C]);
|
|
|
|
uint16_t mount_count = *reinterpret_cast<uint16_t*>(&block0_buffer[0x400 + 0x34]);
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (mount_count > 0) {
|
2019-05-14 19:54:43 +02:00
|
|
|
state->updater->UiPrint(
|
2019-05-03 10:05:04 +02:00
|
|
|
android::base::StringPrintf("Device was remounted R/W %" PRIu16 " times", mount_count));
|
2019-05-14 19:54:43 +02:00
|
|
|
state->updater->UiPrint(
|
2019-05-03 10:05:04 +02:00
|
|
|
android::base::StringPrintf("Last remount happened on %s", ctime(&mount_time)));
|
2017-03-31 10:18:13 +02:00
|
|
|
}
|
2015-12-15 20:47:30 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue("t");
|
2015-12-15 20:47:30 +01:00
|
|
|
}
|
|
|
|
|
2017-03-06 23:44:59 +01:00
|
|
|
Value* BlockImageRecoverFn(const char* name, State* state,
|
|
|
|
const std::vector<std::unique_ptr<Expr>>& argv) {
|
2017-03-31 10:18:13 +02:00
|
|
|
if (argv.size() != 2) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "block_image_recover expects 2 arguments, got %zu",
|
|
|
|
argv.size());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-03-06 23:44:59 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
std::vector<std::unique_ptr<Value>> args;
|
|
|
|
if (!ReadValueArgs(state, argv, &args)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
const std::unique_ptr<Value>& filename = args[0];
|
|
|
|
const std::unique_ptr<Value>& ranges = args[1];
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2018-06-20 00:56:49 +02:00
|
|
|
if (filename->type != Value::Type::STRING) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "filename argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2018-06-20 00:56:49 +02:00
|
|
|
if (ranges->type != Value::Type::STRING) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kArgsParsingFailure, "ranges argument to %s must be string", name);
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2017-11-04 08:08:08 +01:00
|
|
|
RangeSet rs = RangeSet::Parse(ranges->data);
|
|
|
|
if (!rs) {
|
|
|
|
ErrorAbort(state, kArgsParsingFailure, "failed to parse ranges: %s", ranges->data.c_str());
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2019-05-14 19:54:43 +02:00
|
|
|
auto block_device_path = state->updater->FindBlockDeviceName(filename->data);
|
|
|
|
if (block_device_path.empty()) {
|
|
|
|
LOG(ERROR) << "Block device path for " << filename->data << " not found. " << name
|
|
|
|
<< " failed.";
|
|
|
|
return StringValue("");
|
|
|
|
}
|
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// Output notice to log when recover is attempted
|
2019-05-14 19:54:43 +02:00
|
|
|
LOG(INFO) << block_device_path << " image corrupted, attempting to recover...";
|
2015-12-10 00:29:45 +01:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// When opened with O_RDWR, libfec rewrites corrupted blocks when they are read
|
2019-05-14 19:54:43 +02:00
|
|
|
fec::io fh(block_device_path, O_RDWR);
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (!fh) {
|
2019-05-14 19:54:43 +02:00
|
|
|
ErrorAbort(state, kLibfecFailure, "fec_open \"%s\" failed: %s", block_device_path.c_str(),
|
2017-03-31 10:18:13 +02:00
|
|
|
strerror(errno));
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
if (!fh.has_ecc() || !fh.has_verity()) {
|
|
|
|
ErrorAbort(state, kLibfecFailure, "unable to use metadata to correct errors");
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
fec_status status;
|
|
|
|
if (!fh.get_status(status)) {
|
|
|
|
ErrorAbort(state, kLibfecFailure, "failed to read FEC status");
|
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
uint8_t buffer[BLOCKSIZE];
|
2018-08-28 19:09:13 +02:00
|
|
|
for (const auto& [begin, end] : rs) {
|
|
|
|
for (size_t j = begin; j < end; ++j) {
|
2017-03-31 10:18:13 +02:00
|
|
|
// Stay within the data area, libfec validates and corrects metadata
|
2017-03-31 01:57:29 +02:00
|
|
|
if (status.data_size <= static_cast<uint64_t>(j) * BLOCKSIZE) {
|
2017-03-31 10:18:13 +02:00
|
|
|
continue;
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 01:57:29 +02:00
|
|
|
if (fh.pread(buffer, BLOCKSIZE, static_cast<off64_t>(j) * BLOCKSIZE) != BLOCKSIZE) {
|
2017-03-31 10:18:13 +02:00
|
|
|
ErrorAbort(state, kLibfecFailure, "failed to recover %s (block %zu): %s",
|
2019-05-14 19:54:43 +02:00
|
|
|
block_device_path.c_str(), j, strerror(errno));
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue("");
|
|
|
|
}
|
2015-06-25 11:25:36 +02:00
|
|
|
|
2017-03-31 10:18:13 +02:00
|
|
|
// If we want to be able to recover from a situation where rewriting a corrected
|
|
|
|
// block doesn't guarantee the same data will be returned when re-read later, we
|
|
|
|
// can save a copy of corrected blocks to /cache. Note:
|
|
|
|
//
|
|
|
|
// 1. Maximum space required from /cache is the same as the maximum number of
|
|
|
|
// corrupted blocks we can correct. For RS(255, 253) and a 2 GiB partition,
|
|
|
|
// this would be ~16 MiB, for example.
|
|
|
|
//
|
|
|
|
// 2. To find out if this block was corrupted, call fec_get_status after each
|
|
|
|
// read and check if the errors field value has increased.
|
2015-06-25 11:25:36 +02:00
|
|
|
}
|
2017-03-31 10:18:13 +02:00
|
|
|
}
|
2019-05-14 19:54:43 +02:00
|
|
|
LOG(INFO) << "..." << block_device_path << " image recovered successfully.";
|
2017-03-31 10:18:13 +02:00
|
|
|
return StringValue("t");
|
2015-06-25 11:25:36 +02:00
|
|
|
}
|
|
|
|
|
2014-08-15 23:31:52 +02:00
|
|
|
void RegisterBlockImageFunctions() {
|
2017-03-31 10:18:13 +02:00
|
|
|
RegisterFunction("block_image_verify", BlockImageVerifyFn);
|
|
|
|
RegisterFunction("block_image_update", BlockImageUpdateFn);
|
|
|
|
RegisterFunction("block_image_recover", BlockImageRecoverFn);
|
|
|
|
RegisterFunction("check_first_block", CheckFirstBlockFn);
|
|
|
|
RegisterFunction("range_sha1", RangeSha1Fn);
|
2014-08-15 23:31:52 +02:00
|
|
|
}
|