59caa7a045
When calculating the space used for pruning, if a log chunk is compressed, that size is used otherwise the uncompressed size is used. This is intended to reach a steady state where 1/4 of the log buffer is the uncompressed log chunk that is being written to and the other 3/4 of the log buffer is compressed logs. If we wait until there are no readers referencing the log chunk before compressing it, we end up with 2 uncompressed logs (the one that was just filled, that readers are still referencing, and the new one that was allocated to fit the most recent log), which take up 1/2 of the log buffer's allotted size and will thus cause prune to delete more compressed logs than it should. Instead, we should always compress the log chunks in FinishWriting() such that the compressed size will always be used for log chunks other than the one that is not actively written to. Decompressed logs due to readers are ephemeral by their nature and thus don't add to the log buffer size for pruning. Test: observe that log buffers can be filled in the presence of a reader. Change-Id: Ie21ccff032e41c4a0e51710cc435c5ab316563cb
158 lines
5.2 KiB
C++
158 lines
5.2 KiB
C++
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include "SerializedFlushToState.h"
|
|
|
|
#include <android-base/logging.h>
|
|
|
|
SerializedFlushToState::SerializedFlushToState(uint64_t start, LogMask log_mask)
|
|
: FlushToState(start, log_mask) {
|
|
log_id_for_each(i) {
|
|
if (((1 << i) & log_mask) == 0) {
|
|
continue;
|
|
}
|
|
logs_needed_from_next_position_[i] = true;
|
|
}
|
|
}
|
|
|
|
SerializedFlushToState::~SerializedFlushToState() {
|
|
log_id_for_each(i) {
|
|
if (log_positions_[i]) {
|
|
log_positions_[i]->buffer_it->DecReaderRefCount();
|
|
}
|
|
}
|
|
}
|
|
|
|
void SerializedFlushToState::CreateLogPosition(log_id_t log_id) {
|
|
CHECK(!logs_[log_id].empty());
|
|
LogPosition log_position;
|
|
auto it = logs_[log_id].begin();
|
|
while (it != logs_[log_id].end() && start() > it->highest_sequence_number()) {
|
|
++it;
|
|
}
|
|
if (it == logs_[log_id].end()) {
|
|
--it;
|
|
}
|
|
it->IncReaderRefCount();
|
|
log_position.buffer_it = it;
|
|
|
|
// Find the offset of the first log with sequence number >= start().
|
|
int read_offset = 0;
|
|
while (read_offset < it->write_offset()) {
|
|
const auto* entry = it->log_entry(read_offset);
|
|
if (entry->sequence() >= start()) {
|
|
break;
|
|
}
|
|
read_offset += entry->total_len();
|
|
}
|
|
log_position.read_offset = read_offset;
|
|
|
|
log_positions_[log_id].emplace(log_position);
|
|
}
|
|
|
|
void SerializedFlushToState::AddMinHeapEntry(log_id_t log_id) {
|
|
auto& buffer_it = log_positions_[log_id]->buffer_it;
|
|
auto read_offset = log_positions_[log_id]->read_offset;
|
|
|
|
// If there is another log to read in this buffer, add it to the min heap.
|
|
if (read_offset < buffer_it->write_offset()) {
|
|
auto* entry = buffer_it->log_entry(read_offset);
|
|
min_heap_.emplace(log_id, entry);
|
|
} else if (read_offset == buffer_it->write_offset()) {
|
|
// If there are no more logs to read in this buffer and it's the last buffer, then
|
|
// set logs_needed_from_next_position_ to wait until more logs get logged.
|
|
if (buffer_it == std::prev(logs_[log_id].end())) {
|
|
logs_needed_from_next_position_[log_id] = true;
|
|
} else {
|
|
// Otherwise, if there is another buffer piece, move to that and do the same check.
|
|
buffer_it->DecReaderRefCount();
|
|
++buffer_it;
|
|
buffer_it->IncReaderRefCount();
|
|
log_positions_[log_id]->read_offset = 0;
|
|
if (buffer_it->write_offset() == 0) {
|
|
logs_needed_from_next_position_[log_id] = true;
|
|
} else {
|
|
auto* entry = buffer_it->log_entry(0);
|
|
min_heap_.emplace(log_id, entry);
|
|
}
|
|
}
|
|
} else {
|
|
// read_offset > buffer_it->write_offset() should never happen.
|
|
CHECK(false);
|
|
}
|
|
}
|
|
|
|
void SerializedFlushToState::CheckForNewLogs() {
|
|
log_id_for_each(i) {
|
|
if (!logs_needed_from_next_position_[i]) {
|
|
continue;
|
|
}
|
|
if (!log_positions_[i]) {
|
|
if (logs_[i].empty()) {
|
|
continue;
|
|
}
|
|
CreateLogPosition(i);
|
|
}
|
|
logs_needed_from_next_position_[i] = false;
|
|
// If it wasn't possible to insert, logs_needed_from_next_position will be set back to true.
|
|
AddMinHeapEntry(i);
|
|
}
|
|
}
|
|
|
|
MinHeapElement SerializedFlushToState::PopNextUnreadLog() {
|
|
auto top = min_heap_.top();
|
|
min_heap_.pop();
|
|
|
|
auto* entry = top.entry;
|
|
auto log_id = top.log_id;
|
|
|
|
log_positions_[log_id]->read_offset += entry->total_len();
|
|
|
|
logs_needed_from_next_position_[log_id] = true;
|
|
|
|
return top;
|
|
}
|
|
|
|
void SerializedFlushToState::Prune(log_id_t log_id,
|
|
const std::list<SerializedLogChunk>::iterator& buffer_it) {
|
|
// If we don't have a position for this log or if we're not referencing buffer_it, ignore.
|
|
if (!log_positions_[log_id].has_value() || log_positions_[log_id]->buffer_it != buffer_it) {
|
|
return;
|
|
}
|
|
|
|
// // Decrease the ref count since we're deleting our reference.
|
|
buffer_it->DecReaderRefCount();
|
|
|
|
// Delete in the reference.
|
|
log_positions_[log_id].reset();
|
|
|
|
// Remove the MinHeapElement referencing log_id, if it exists, but retain the others.
|
|
std::vector<MinHeapElement> old_elements;
|
|
while (!min_heap_.empty()) {
|
|
auto& element = min_heap_.top();
|
|
if (element.log_id != log_id) {
|
|
old_elements.emplace_back(element);
|
|
}
|
|
min_heap_.pop();
|
|
}
|
|
for (auto&& element : old_elements) {
|
|
min_heap_.emplace(element);
|
|
}
|
|
|
|
// Finally set logs_needed_from_next_position_, so CheckForNewLogs() will re-create the
|
|
// log_position_ object during the next read.
|
|
logs_needed_from_next_position_[log_id] = true;
|
|
}
|