diff --git a/tests/Android.mk b/tests/Android.mk index 346873db..8b1dc109 100644 --- a/tests/Android.mk +++ b/tests/Android.mk @@ -160,6 +160,7 @@ LOCAL_STATIC_LIBRARIES := \ libfec_rs \ libsquashfs_utils \ libcutils \ + libbrotli \ $(tune2fs_static_libraries) testdata_files := $(call find-subdir-files, testdata/*) diff --git a/tests/component/updater_test.cpp b/tests/component/updater_test.cpp index 0298a764..357a39ef 100644 --- a/tests/component/updater_test.cpp +++ b/tests/component/updater_test.cpp @@ -15,10 +15,12 @@ */ #include +#include #include #include #include +#include #include #include #include @@ -29,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -576,4 +579,68 @@ TEST_F(UpdaterTest, new_data_short_write) { std::string script_exact_data = "block_image_update(\"" + std::string(update_file.path) + R"(", package_extract_file("transfer_list"), "exact_new_data", "patch_data"))"; expect("t", script_exact_data.c_str(), kNoCause, &updater_info); + CloseArchive(handle); +} + +TEST_F(UpdaterTest, brotli_new_data) { + // Create a zip file with new_data. + TemporaryFile zip_file; + FILE* zip_file_ptr = fdopen(zip_file.fd, "wb"); + ZipWriter zip_writer(zip_file_ptr); + + // Add a brotli compressed new data entry. + ASSERT_EQ(0, zip_writer.StartEntry("new.dat.br", 0)); + + auto generator = []() { return rand() % 128; }; + // Generate 2048 blocks of random data. + std::string brotli_new_data; + brotli_new_data.reserve(4096 * 2048); + generate_n(back_inserter(brotli_new_data), 4096 * 2048, generator); + + size_t encoded_size = BrotliEncoderMaxCompressedSize(brotli_new_data.size()); + std::vector encoded_data(encoded_size); + ASSERT_TRUE(BrotliEncoderCompress( + BROTLI_DEFAULT_QUALITY, BROTLI_DEFAULT_WINDOW, BROTLI_DEFAULT_MODE, brotli_new_data.size(), + reinterpret_cast(brotli_new_data.data()), &encoded_size, encoded_data.data())); + + ASSERT_EQ(0, zip_writer.WriteBytes(encoded_data.data(), encoded_size)); + ASSERT_EQ(0, zip_writer.FinishEntry()); + // Add a dummy patch data. + ASSERT_EQ(0, zip_writer.StartEntry("patch_data", 0)); + ASSERT_EQ(0, zip_writer.FinishEntry()); + + std::vector transfer_list = { + "4", "2048", "0", "0", "new 4,0,512,512,1024", "new 2,1024,2048", + }; + ASSERT_EQ(0, zip_writer.StartEntry("transfer_list", 0)); + std::string commands = android::base::Join(transfer_list, '\n'); + ASSERT_EQ(0, zip_writer.WriteBytes(commands.data(), commands.size())); + ASSERT_EQ(0, zip_writer.FinishEntry()); + ASSERT_EQ(0, zip_writer.Finish()); + ASSERT_EQ(0, fclose(zip_file_ptr)); + + MemMapping map; + ASSERT_TRUE(map.MapFile(zip_file.path)); + ZipArchiveHandle handle; + ASSERT_EQ(0, OpenArchiveFromMemory(map.addr, map.length, zip_file.path, &handle)); + + // Set up the handler, command_pipe, patch offset & length. + UpdaterInfo updater_info; + updater_info.package_zip = handle; + TemporaryFile temp_pipe; + updater_info.cmd_pipe = fopen(temp_pipe.path, "wb"); + updater_info.package_zip_addr = map.addr; + updater_info.package_zip_len = map.length; + + // Check if we can decompress the new data correctly. + TemporaryFile update_file; + std::string script_new_data = + "block_image_update(\"" + std::string(update_file.path) + + R"(", package_extract_file("transfer_list"), "new.dat.br", "patch_data"))"; + expect("t", script_new_data.c_str(), kNoCause, &updater_info); + + std::string updated_content; + ASSERT_TRUE(android::base::ReadFileToString(update_file.path, &updated_content)); + ASSERT_EQ(brotli_new_data, updated_content); + CloseArchive(handle); } diff --git a/updater/Android.mk b/updater/Android.mk index a113fe86..86dc48e3 100644 --- a/updater/Android.mk +++ b/updater/Android.mk @@ -47,6 +47,7 @@ updater_common_static_libraries := \ libcrypto_utils \ libcutils \ libtune2fs \ + libbrotli \ $(tune2fs_static_libraries) # libupdater (static library) diff --git a/updater/blockimg.cpp b/updater/blockimg.cpp index df366b0b..2bec487f 100644 --- a/updater/blockimg.cpp +++ b/updater/blockimg.cpp @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -149,40 +150,32 @@ static void allocate(size_t size, std::vector& buffer) { class RangeSinkWriter { public: RangeSinkWriter(int fd, const RangeSet& tgt) - : fd_(fd), tgt_(tgt), next_range_(0), current_range_left_(0), bytes_written_(0) { + : fd_(fd), + tgt_(tgt), + next_range_(0), + current_range_left_(0), + bytes_written_(0) { CHECK_NE(tgt.size(), static_cast(0)); }; + virtual ~RangeSinkWriter() {}; + bool Finished() const { return next_range_ == tgt_.size() && current_range_left_ == 0; } - size_t Write(const uint8_t* data, size_t size) { + // Return number of bytes consumed; and 0 indicates a writing failure. + virtual size_t Write(const uint8_t* data, size_t size) { if (Finished()) { LOG(ERROR) << "range sink write overrun; can't write " << size << " bytes"; return 0; } - size_t written = 0; + size_t consumed = 0; while (size > 0) { // Move to the next range as needed. - if (current_range_left_ == 0) { - if (next_range_ < tgt_.size()) { - const Range& range = tgt_[next_range_]; - off64_t offset = static_cast(range.first) * BLOCKSIZE; - current_range_left_ = (range.second - range.first) * BLOCKSIZE; - next_range_++; - if (!discard_blocks(fd_, offset, current_range_left_)) { - break; - } - - if (!check_lseek(fd_, offset, SEEK_SET)) { - break; - } - } else { - // We can't write any more; return how many bytes have been written so far. - break; - } + if (!SeekToOutputRange()) { + break; } size_t write_now = size; @@ -198,21 +191,47 @@ class RangeSinkWriter { size -= write_now; current_range_left_ -= write_now; - written += write_now; + consumed += write_now; } - bytes_written_ += written; - return written; + bytes_written_ += consumed; + return consumed; } size_t BytesWritten() const { return bytes_written_; } - private: - // The input data. + protected: + // Set up the output cursor, move to next range if needed. + bool SeekToOutputRange() { + // We haven't finished the current range yet. + if (current_range_left_ != 0) { + return true; + } + // We can't write any more; let the write function return how many bytes have been written + // so far. + if (next_range_ >= tgt_.size()) { + return false; + } + + const Range& range = tgt_[next_range_]; + off64_t offset = static_cast(range.first) * BLOCKSIZE; + current_range_left_ = (range.second - range.first) * BLOCKSIZE; + next_range_++; + + if (!discard_blocks(fd_, offset, current_range_left_)) { + return false; + } + if (!check_lseek(fd_, offset, SEEK_SET)) { + return false; + } + return true; + } + + // The output file descriptor. int fd_; - // The destination for the data. + // The destination ranges for the data. const RangeSet& tgt_; // The next range that we should write to. size_t next_range_; @@ -222,6 +241,75 @@ class RangeSinkWriter { size_t bytes_written_; }; +class BrotliNewDataWriter : public RangeSinkWriter { + public: + BrotliNewDataWriter(int fd, const RangeSet& tgt, BrotliDecoderState* state) + : RangeSinkWriter(fd, tgt), state_(state) {} + + size_t Write(const uint8_t* data, size_t size) override { + if (Finished()) { + LOG(ERROR) << "Brotli new data write overrun; can't write " << size << " bytes"; + return 0; + } + CHECK(state_ != nullptr); + + size_t consumed = 0; + while (true) { + // Move to the next range as needed. + if (!SeekToOutputRange()) { + break; + } + + size_t available_in = size; + size_t write_now = std::min(32768, current_range_left_); + uint8_t buffer[write_now]; + + size_t available_out = write_now; + uint8_t* next_out = buffer; + + // The brotli decoder will update |data|, |available_in|, |next_out| and |available_out|. + BrotliDecoderResult result = BrotliDecoderDecompressStream( + state_, &available_in, &data, &available_out, &next_out, nullptr); + + // We don't have a way to recover from the decode error; report the failure. + if (result == BROTLI_DECODER_RESULT_ERROR) { + LOG(ERROR) << "Decompression failed with " + << BrotliDecoderErrorString(BrotliDecoderGetErrorCode(state_)); + return 0; + } + + if (write_all(fd_, buffer, write_now - available_out) == -1) { + return 0; + } + + LOG(DEBUG) << "bytes written: " << write_now - available_out << ", bytes consumed " + << size - available_in << ", decoder status " << result; + + // Update the total bytes written to output by the current writer; this is different from the + // consumed input bytes. + bytes_written_ += write_now - available_out; + current_range_left_ -= (write_now - available_out); + consumed += (size - available_in); + + // Update the remaining size. The input data ptr is already updated by brotli decoder + // function. + size = available_in; + + // Continue if we have more output to write, or more input to consume. + if (result == BROTLI_DECODER_RESULT_SUCCESS || + (result == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT && size == 0)) { + break; + } + } + + return consumed; + } + + private: + // Pointer to the decoder state. (initialized by PerformBlockImageUpdate) + BrotliDecoderState* state_; +}; + /** * All of the data for all the 'new' transfers is contained in one file in the update package, * concatenated together in the order in which transfers.list will need it. We want to stream it out @@ -243,8 +331,10 @@ class RangeSinkWriter { struct NewThreadInfo { ZipArchiveHandle za; ZipEntry entry; + bool brotli_compressed; - RangeSinkWriter* writer; + std::unique_ptr writer; + BrotliDecoderState* brotli_decoder_state; bool receiver_available; pthread_mutex_t mu; @@ -264,9 +354,16 @@ static bool receive_new_data(const uint8_t* data, size_t size, void* cookie) { // At this point nti->writer is set, and we own it. The main thread is waiting for it to // disappear from nti. - size_t written = nti->writer->Write(data, size); - data += written; - size -= written; + size_t consumed = nti->writer->Write(data, size); + + // We encounter a fatal error if we fail to consume any input bytes. If this happens, abort the + // extraction. + if (consumed == 0) { + LOG(ERROR) << "Failed to process " << size << " input bytes."; + return false; + } + data += consumed; + size -= consumed; if (nti->writer->Finished()) { // We have written all the bytes desired by this writer. @@ -1142,9 +1239,13 @@ static int PerformCommandNew(CommandParameters& params) { if (params.canwrite) { LOG(INFO) << " writing " << tgt.blocks() << " blocks of new data"; - RangeSinkWriter writer(params.fd, tgt); pthread_mutex_lock(¶ms.nti.mu); - params.nti.writer = &writer; + if (params.nti.brotli_compressed) { + params.nti.writer = + std::make_unique(params.fd, tgt, params.nti.brotli_decoder_state); + } else { + params.nti.writer = std::make_unique(params.fd, tgt); + } pthread_cond_broadcast(¶ms.nti.cv); while (params.nti.writer != nullptr) { @@ -1384,6 +1485,12 @@ static Value* PerformBlockImageUpdate(const char* name, State* state, if (params.canwrite) { params.nti.za = za; params.nti.entry = new_entry; + // The entry is compressed by brotli if has a 'br' extension. + params.nti.brotli_compressed = android::base::EndsWith(new_data_fn->data, ".br"); + if (params.nti.brotli_compressed) { + // Initialize brotli decoder state. + params.nti.brotli_decoder_state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + } params.nti.receiver_available = true; pthread_mutex_init(¶ms.nti.mu, nullptr); @@ -1526,6 +1633,10 @@ pbiudone: } // params.fd will be automatically closed because it's a unique_fd. + if (params.nti.brotli_decoder_state != nullptr) { + BrotliDecoderDestroyInstance(params.nti.brotli_decoder_state); + } + // Only delete the stash if the update cannot be resumed, or it's a verification run and we // created the stash. if (params.isunresumable || (!params.canwrite && params.createdstash)) {