platform_system_core/logd/SerializedLogChunk.cpp
Tom Cherry 59caa7a045 logd: always compress SerializedLogChunk in FinishWriting()
When calculating the space used for pruning, if a log chunk is
compressed, that size is used otherwise the uncompressed size is
used.  This is intended to reach a steady state where 1/4 of the log
buffer is the uncompressed log chunk that is being written to and the
other 3/4 of the log buffer is compressed logs.

If we wait until there are no readers referencing the log chunk before
compressing it, we end up with 2 uncompressed logs (the one that was
just filled, that readers are still referencing, and the new one that
was allocated to fit the most recent log), which take up 1/2 of the
log buffer's allotted size and will thus cause prune to delete more
compressed logs than it should.

Instead, we should always compress the log chunks in FinishWriting()
such that the compressed size will always be used for log chunks other
than the one that is not actively written to.

Decompressed logs due to readers are ephemeral by their nature and
thus don't add to the log buffer size for pruning.

Test: observe that log buffers can be filled in the presence of a reader.
Change-Id: Ie21ccff032e41c4a0e51710cc435c5ab316563cb
2020-07-16 20:46:14 -07:00

114 lines
No EOL
3.6 KiB
C++

/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SerializedLogChunk.h"
#include <android-base/logging.h>
#include "CompressionEngine.h"
SerializedLogChunk::~SerializedLogChunk() {
CHECK_EQ(reader_ref_count_, 0U);
}
void SerializedLogChunk::Compress() {
if (compressed_log_.size() == 0) {
CompressionEngine::GetInstance().Compress(contents_, write_offset_, compressed_log_);
LOG(INFO) << "Compressed Log, buffer max size: " << contents_.size()
<< " size used: " << write_offset_
<< " compressed size: " << compressed_log_.size();
}
}
// TODO: Develop a better reference counting strategy to guard against the case where the writer is
// much faster than the reader, and we needlessly compess / decompress the logs.
void SerializedLogChunk::IncReaderRefCount() {
if (++reader_ref_count_ != 1 || writer_active_) {
return;
}
contents_.Resize(write_offset_);
CompressionEngine::GetInstance().Decompress(compressed_log_, contents_);
}
void SerializedLogChunk::DecReaderRefCount() {
CHECK_NE(reader_ref_count_, 0U);
if (--reader_ref_count_ != 0) {
return;
}
if (!writer_active_) {
contents_.Resize(0);
}
}
bool SerializedLogChunk::ClearUidLogs(uid_t uid, log_id_t log_id, LogStatistics* stats) {
CHECK_EQ(reader_ref_count_, 0U);
if (write_offset_ == 0) {
return true;
}
IncReaderRefCount();
int read_offset = 0;
int new_write_offset = 0;
while (read_offset < write_offset_) {
const auto* entry = log_entry(read_offset);
if (entry->uid() == uid) {
read_offset += entry->total_len();
if (stats != nullptr) {
stats->Subtract(entry->ToLogStatisticsElement(log_id));
}
continue;
}
size_t entry_total_len = entry->total_len();
if (read_offset != new_write_offset) {
memmove(contents_.data() + new_write_offset, contents_.data() + read_offset,
entry_total_len);
}
read_offset += entry_total_len;
new_write_offset += entry_total_len;
}
if (new_write_offset == 0) {
DecReaderRefCount();
return true;
}
// Clear the old compressed logs and set write_offset_ appropriately to compress the new
// partially cleared log.
if (new_write_offset != write_offset_) {
compressed_log_.Resize(0);
write_offset_ = new_write_offset;
Compress();
}
DecReaderRefCount();
return false;
}
bool SerializedLogChunk::CanLog(size_t len) {
return write_offset_ + len <= contents_.size();
}
SerializedLogEntry* SerializedLogChunk::Log(uint64_t sequence, log_time realtime, uid_t uid,
pid_t pid, pid_t tid, const char* msg, uint16_t len) {
auto new_log_address = contents_.data() + write_offset_;
auto* entry = new (new_log_address) SerializedLogEntry(uid, pid, tid, sequence, realtime, len);
memcpy(entry->msg(), msg, len);
write_offset_ += entry->total_len();
highest_sequence_number_ = sequence;
return entry;
}