2014-02-26 18:50:16 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012-2014 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
2016-10-25 01:22:17 +02:00
|
|
|
// for manual checking of stale entries during LogBuffer::erase()
|
|
|
|
//#define DEBUG_CHECK_FOR_STALE_ENTRIES
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2014-05-06 16:34:59 +02:00
|
|
|
#include <ctype.h>
|
2016-12-17 01:09:15 +01:00
|
|
|
#include <endian.h>
|
2015-02-09 17:21:05 +01:00
|
|
|
#include <errno.h>
|
2014-02-26 18:50:16 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
2016-10-24 17:20:26 +02:00
|
|
|
#include <sys/cdefs.h>
|
2014-05-10 02:44:18 +02:00
|
|
|
#include <sys/user.h>
|
2014-02-26 18:50:16 +01:00
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2015-05-19 18:12:30 +02:00
|
|
|
#include <unordered_map>
|
2019-08-21 23:16:34 +02:00
|
|
|
#include <utility>
|
2015-05-19 18:12:30 +02:00
|
|
|
|
2014-05-06 16:34:59 +02:00
|
|
|
#include <cutils/properties.h>
|
2016-09-27 22:08:23 +02:00
|
|
|
#include <private/android_logger.h>
|
2014-02-26 18:50:16 +01:00
|
|
|
|
|
|
|
#include "LogBuffer.h"
|
2015-09-08 17:56:32 +02:00
|
|
|
#include "LogKlog.h"
|
2014-05-06 16:34:59 +02:00
|
|
|
#include "LogReader.h"
|
2016-12-13 19:31:29 +01:00
|
|
|
#include "LogUtils.h"
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2016-10-24 17:20:26 +02:00
|
|
|
#ifndef __predict_false
|
|
|
|
#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
|
|
|
|
#endif
|
|
|
|
|
2014-02-11 21:29:31 +01:00
|
|
|
// Default
|
|
|
|
#define log_buffer_size(id) mMaxSize[id]
|
2014-05-06 16:34:59 +02:00
|
|
|
|
2015-03-11 00:45:17 +01:00
|
|
|
void LogBuffer::init() {
|
2014-05-10 02:44:18 +02:00
|
|
|
log_id_for_each(i) {
|
2016-09-27 22:08:23 +02:00
|
|
|
if (setSize(i, __android_logger_get_buffer_size(i))) {
|
2014-05-10 02:44:18 +02:00
|
|
|
setSize(i, LOG_BUFFER_MIN_SIZE);
|
|
|
|
}
|
2014-02-11 21:29:31 +01:00
|
|
|
}
|
2015-09-08 17:56:32 +02:00
|
|
|
bool lastMonotonic = monotonic;
|
2015-12-02 00:57:25 +01:00
|
|
|
monotonic = android_log_clockid() == CLOCK_MONOTONIC;
|
2015-11-30 20:35:56 +01:00
|
|
|
if (lastMonotonic != monotonic) {
|
|
|
|
//
|
|
|
|
// Fixup all timestamps, may not be 100% accurate, but better than
|
|
|
|
// throwing what we have away when we get 'surprised' by a change.
|
|
|
|
// In-place element fixup so no need to check reader-lock. Entries
|
|
|
|
// should already be in timestamp order, but we could end up with a
|
|
|
|
// few out-of-order entries if new monotonics come in before we
|
|
|
|
// are notified of the reinit change in status. A Typical example would
|
|
|
|
// be:
|
|
|
|
// --------- beginning of system
|
|
|
|
// 10.494082 184 201 D Cryptfs : Just triggered post_fs_data
|
|
|
|
// --------- beginning of kernel
|
|
|
|
// 0.000000 0 0 I : Initializing cgroup subsys
|
|
|
|
// as the act of mounting /data would trigger persist.logd.timestamp to
|
|
|
|
// be corrected. 1/30 corner case YMMV.
|
|
|
|
//
|
2017-04-18 23:09:45 +02:00
|
|
|
rdlock();
|
2015-11-30 20:35:56 +01:00
|
|
|
LogBufferElementCollection::iterator it = mLogElements.begin();
|
2017-03-10 23:31:54 +01:00
|
|
|
while ((it != mLogElements.end())) {
|
|
|
|
LogBufferElement* e = *it;
|
2015-11-30 20:35:56 +01:00
|
|
|
if (monotonic) {
|
|
|
|
if (!android::isMonotonic(e->mRealTime)) {
|
|
|
|
LogKlog::convertRealToMonotonic(e->mRealTime);
|
2017-05-18 19:06:00 +02:00
|
|
|
if ((e->mRealTime.tv_nsec % 1000) == 0) {
|
|
|
|
e->mRealTime.tv_nsec++;
|
|
|
|
}
|
2015-11-30 20:35:56 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (android::isMonotonic(e->mRealTime)) {
|
|
|
|
LogKlog::convertMonotonicToReal(e->mRealTime);
|
2017-05-18 19:06:00 +02:00
|
|
|
if ((e->mRealTime.tv_nsec % 1000) == 0) {
|
|
|
|
e->mRealTime.tv_nsec++;
|
|
|
|
}
|
2015-11-30 20:35:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
++it;
|
|
|
|
}
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2015-09-08 17:56:32 +02:00
|
|
|
}
|
|
|
|
|
2020-05-02 01:45:25 +02:00
|
|
|
// Release any sleeping reader threads to dump their current content.
|
2017-04-18 23:09:45 +02:00
|
|
|
LogTimeEntry::wrlock();
|
2015-11-30 20:35:56 +01:00
|
|
|
|
|
|
|
LastLogTimes::iterator times = mTimes.begin();
|
2017-03-10 23:31:54 +01:00
|
|
|
while (times != mTimes.end()) {
|
2018-10-09 02:33:50 +02:00
|
|
|
LogTimeEntry* entry = times->get();
|
|
|
|
entry->triggerReader_Locked();
|
2015-11-30 20:35:56 +01:00
|
|
|
times++;
|
2015-09-08 17:56:32 +02:00
|
|
|
}
|
2015-11-30 20:35:56 +01:00
|
|
|
|
|
|
|
LogTimeEntry::unlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
LogBuffer::LogBuffer(LastLogTimes* times, LogTags* tags, PruneList* prune)
|
|
|
|
: monotonic(android_log_clockid() == CLOCK_MONOTONIC),
|
|
|
|
mTimes(*times),
|
|
|
|
tags_(tags),
|
|
|
|
prune_(prune) {
|
2017-04-18 23:09:45 +02:00
|
|
|
pthread_rwlock_init(&mLogElementsLock, nullptr);
|
2015-03-11 00:45:17 +01:00
|
|
|
|
2016-12-13 19:31:29 +01:00
|
|
|
log_id_for_each(i) {
|
2017-03-31 19:48:39 +02:00
|
|
|
lastLoggedElements[i] = nullptr;
|
|
|
|
droppedElements[i] = nullptr;
|
2016-12-13 19:31:29 +01:00
|
|
|
}
|
|
|
|
|
2015-03-11 00:45:17 +01:00
|
|
|
init();
|
|
|
|
}
|
|
|
|
|
2016-12-13 19:31:29 +01:00
|
|
|
LogBuffer::~LogBuffer() {
|
|
|
|
log_id_for_each(i) {
|
|
|
|
delete lastLoggedElements[i];
|
|
|
|
delete droppedElements[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 02:58:18 +02:00
|
|
|
LogBufferElementCollection::iterator LogBuffer::GetOldest(log_id_t log_id) {
|
|
|
|
auto it = mLogElements.begin();
|
2020-05-04 19:17:42 +02:00
|
|
|
if (oldest_[log_id]) {
|
|
|
|
it = *oldest_[log_id];
|
2020-04-30 02:58:18 +02:00
|
|
|
}
|
|
|
|
while (it != mLogElements.end() && (*it)->getLogId() != log_id) {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
if (it != mLogElements.end()) {
|
2020-05-04 19:17:42 +02:00
|
|
|
oldest_[log_id] = it;
|
2020-04-30 02:58:18 +02:00
|
|
|
}
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
enum match_type { DIFFERENT, SAME, SAME_LIBLOG };
|
2016-12-17 01:09:15 +01:00
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
static enum match_type identical(LogBufferElement* elem,
|
|
|
|
LogBufferElement* last) {
|
2016-12-13 19:31:29 +01:00
|
|
|
// is it mostly identical?
|
2017-03-10 23:31:54 +01:00
|
|
|
// if (!elem) return DIFFERENT;
|
2016-08-11 17:02:06 +02:00
|
|
|
ssize_t lenl = elem->getMsgLen();
|
|
|
|
if (lenl <= 0) return DIFFERENT; // value if this represents a chatty elem
|
2017-03-10 23:31:54 +01:00
|
|
|
// if (!last) return DIFFERENT;
|
2016-08-11 17:02:06 +02:00
|
|
|
ssize_t lenr = last->getMsgLen();
|
|
|
|
if (lenr <= 0) return DIFFERENT; // value if this represents a chatty elem
|
2017-03-10 23:31:54 +01:00
|
|
|
// if (elem->getLogId() != last->getLogId()) return DIFFERENT;
|
2016-12-17 01:09:15 +01:00
|
|
|
if (elem->getUid() != last->getUid()) return DIFFERENT;
|
|
|
|
if (elem->getPid() != last->getPid()) return DIFFERENT;
|
|
|
|
if (elem->getTid() != last->getTid()) return DIFFERENT;
|
2016-12-13 19:31:29 +01:00
|
|
|
|
|
|
|
// last is more than a minute old, stop squashing identical messages
|
|
|
|
if (elem->getRealTime().nsec() >
|
2017-03-10 23:31:54 +01:00
|
|
|
(last->getRealTime().nsec() + 60 * NS_PER_SEC))
|
|
|
|
return DIFFERENT;
|
2016-12-13 19:31:29 +01:00
|
|
|
|
|
|
|
// Identical message
|
|
|
|
const char* msgl = elem->getMsg();
|
|
|
|
const char* msgr = last->getMsg();
|
2016-12-17 01:09:15 +01:00
|
|
|
if (lenl == lenr) {
|
|
|
|
if (!fastcmp<memcmp>(msgl, msgr, lenl)) return SAME;
|
|
|
|
// liblog tagged messages (content gets summed)
|
|
|
|
if ((elem->getLogId() == LOG_ID_EVENTS) &&
|
|
|
|
(lenl == sizeof(android_log_event_int_t)) &&
|
2017-03-10 23:31:54 +01:00
|
|
|
!fastcmp<memcmp>(msgl, msgr, sizeof(android_log_event_int_t) -
|
|
|
|
sizeof(int32_t)) &&
|
2017-03-13 23:41:59 +01:00
|
|
|
(elem->getTag() == LIBLOG_LOG_TAG)) {
|
2017-03-10 23:31:54 +01:00
|
|
|
return SAME_LIBLOG;
|
2017-03-13 23:41:59 +01:00
|
|
|
}
|
2016-12-17 01:09:15 +01:00
|
|
|
}
|
2016-12-13 19:31:29 +01:00
|
|
|
|
|
|
|
// audit message (except sequence number) identical?
|
2018-03-13 19:06:38 +01:00
|
|
|
if (last->isBinary() &&
|
|
|
|
(lenl > static_cast<ssize_t>(sizeof(android_log_event_string_t))) &&
|
|
|
|
(lenr > static_cast<ssize_t>(sizeof(android_log_event_string_t)))) {
|
2017-03-10 23:31:54 +01:00
|
|
|
if (fastcmp<memcmp>(msgl, msgr, sizeof(android_log_event_string_t) -
|
2017-03-13 23:41:59 +01:00
|
|
|
sizeof(int32_t))) {
|
2017-03-10 23:31:54 +01:00
|
|
|
return DIFFERENT;
|
2017-03-13 23:41:59 +01:00
|
|
|
}
|
2016-12-13 19:31:29 +01:00
|
|
|
msgl += sizeof(android_log_event_string_t);
|
|
|
|
lenl -= sizeof(android_log_event_string_t);
|
|
|
|
msgr += sizeof(android_log_event_string_t);
|
|
|
|
lenr -= sizeof(android_log_event_string_t);
|
|
|
|
}
|
2016-08-11 17:02:06 +02:00
|
|
|
static const char avc[] = "): avc: ";
|
2017-03-10 23:31:54 +01:00
|
|
|
const char* avcl = android::strnstr(msgl, lenl, avc);
|
2016-12-17 01:09:15 +01:00
|
|
|
if (!avcl) return DIFFERENT;
|
2016-12-13 19:31:29 +01:00
|
|
|
lenl -= avcl - msgl;
|
2017-03-10 23:31:54 +01:00
|
|
|
const char* avcr = android::strnstr(msgr, lenr, avc);
|
2016-12-17 01:09:15 +01:00
|
|
|
if (!avcr) return DIFFERENT;
|
2016-12-13 19:31:29 +01:00
|
|
|
lenr -= avcr - msgr;
|
2016-12-17 01:09:15 +01:00
|
|
|
if (lenl != lenr) return DIFFERENT;
|
2016-08-11 17:02:06 +02:00
|
|
|
if (fastcmp<memcmp>(avcl + strlen(avc), avcr + strlen(avc),
|
2017-03-13 23:41:59 +01:00
|
|
|
lenl - strlen(avc))) {
|
2017-03-10 23:31:54 +01:00
|
|
|
return DIFFERENT;
|
2017-03-13 23:41:59 +01:00
|
|
|
}
|
2016-12-17 01:09:15 +01:00
|
|
|
return SAME;
|
2016-12-13 19:31:29 +01:00
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
int LogBuffer::log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid,
|
2018-08-13 23:22:56 +02:00
|
|
|
pid_t tid, const char* msg, uint16_t len) {
|
2018-03-01 23:48:53 +01:00
|
|
|
if (log_id >= LOG_ID_MAX) {
|
2015-02-09 17:21:05 +01:00
|
|
|
return -EINVAL;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
2014-10-02 22:07:05 +02:00
|
|
|
|
2017-05-18 19:06:00 +02:00
|
|
|
// Slip the time by 1 nsec if the incoming lands on xxxxxx000 ns.
|
|
|
|
// This prevents any chance that an outside source can request an
|
|
|
|
// exact entry with time specified in ms or us precision.
|
|
|
|
if ((realtime.tv_nsec % 1000) == 0) ++realtime.tv_nsec;
|
|
|
|
|
2020-02-20 22:21:51 +01:00
|
|
|
LogBufferElement* elem = new LogBufferElement(log_id, realtime, uid, pid, tid, msg, len);
|
|
|
|
|
|
|
|
// b/137093665: don't coalesce security messages.
|
|
|
|
if (log_id == LOG_ID_SECURITY) {
|
|
|
|
wrlock();
|
|
|
|
log(elem);
|
|
|
|
unlock();
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
int prio = ANDROID_LOG_INFO;
|
|
|
|
const char* tag = nullptr;
|
|
|
|
size_t tag_len = 0;
|
|
|
|
if (log_id == LOG_ID_EVENTS || log_id == LOG_ID_STATS) {
|
2020-05-02 01:13:18 +02:00
|
|
|
tag = tags_->tagToName(elem->getTag());
|
2020-02-20 22:21:51 +01:00
|
|
|
if (tag) {
|
|
|
|
tag_len = strlen(tag);
|
2015-12-04 19:59:45 +01:00
|
|
|
}
|
2020-02-20 22:21:51 +01:00
|
|
|
} else {
|
|
|
|
prio = *msg;
|
|
|
|
tag = msg + 1;
|
|
|
|
tag_len = strnlen(tag, len - 1);
|
|
|
|
}
|
|
|
|
if (!__android_log_is_loggable_len(prio, tag, tag_len, ANDROID_LOG_VERBOSE)) {
|
|
|
|
// Log traffic received to total
|
|
|
|
wrlock();
|
|
|
|
stats.addTotal(elem);
|
|
|
|
unlock();
|
|
|
|
delete elem;
|
|
|
|
return -EACCES;
|
2014-10-02 22:07:05 +02:00
|
|
|
}
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
wrlock();
|
2016-12-13 19:31:29 +01:00
|
|
|
LogBufferElement* currentLast = lastLoggedElements[log_id];
|
|
|
|
if (currentLast) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* dropped = droppedElements[log_id];
|
2018-08-13 23:22:56 +02:00
|
|
|
uint16_t count = dropped ? dropped->getDropped() : 0;
|
2016-12-17 01:09:15 +01:00
|
|
|
//
|
|
|
|
// State Init
|
|
|
|
// incoming:
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr
|
|
|
|
// currentLast = nullptr;
|
2016-12-17 01:09:15 +01:00
|
|
|
// elem = incoming message
|
|
|
|
// outgoing:
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr -> State 0
|
2016-12-17 01:09:15 +01:00
|
|
|
// currentLast = copy of elem
|
|
|
|
// log elem
|
|
|
|
// State 0
|
|
|
|
// incoming:
|
|
|
|
// count = 0
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr
|
2016-12-17 01:09:15 +01:00
|
|
|
// currentLast = copy of last message
|
|
|
|
// elem = incoming message
|
2016-12-17 01:09:15 +01:00
|
|
|
// outgoing: if match != DIFFERENT
|
2016-12-17 01:09:15 +01:00
|
|
|
// dropped = copy of first identical message -> State 1
|
|
|
|
// currentLast = reference to elem
|
2016-12-17 01:09:15 +01:00
|
|
|
// break: if match == DIFFERENT
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr -> State 0
|
2016-12-17 01:09:15 +01:00
|
|
|
// delete copy of last message (incoming currentLast)
|
|
|
|
// currentLast = copy of elem
|
|
|
|
// log elem
|
|
|
|
// State 1
|
|
|
|
// incoming:
|
|
|
|
// count = 0
|
|
|
|
// dropped = copy of first identical message
|
|
|
|
// currentLast = reference to last held-back incoming
|
|
|
|
// message
|
|
|
|
// elem = incoming message
|
2016-12-17 01:09:15 +01:00
|
|
|
// outgoing: if match == SAME
|
2016-12-17 01:09:15 +01:00
|
|
|
// delete copy of first identical message (dropped)
|
|
|
|
// dropped = reference to last held-back incoming
|
|
|
|
// message set to chatty count of 1 -> State 2
|
|
|
|
// currentLast = reference to elem
|
2016-12-17 01:09:15 +01:00
|
|
|
// outgoing: if match == SAME_LIBLOG
|
|
|
|
// dropped = copy of first identical message -> State 1
|
|
|
|
// take sum of currentLast and elem
|
|
|
|
// if sum overflows:
|
|
|
|
// log currentLast
|
|
|
|
// currentLast = reference to elem
|
|
|
|
// else
|
|
|
|
// delete currentLast
|
|
|
|
// currentLast = reference to elem, sum liblog.
|
|
|
|
// break: if match == DIFFERENT
|
2016-12-17 01:09:15 +01:00
|
|
|
// delete dropped
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr -> State 0
|
2016-12-17 01:09:15 +01:00
|
|
|
// log reference to last held-back (currentLast)
|
|
|
|
// currentLast = copy of elem
|
|
|
|
// log elem
|
|
|
|
// State 2
|
|
|
|
// incoming:
|
|
|
|
// count = chatty count
|
|
|
|
// dropped = chatty message holding count
|
|
|
|
// currentLast = reference to last held-back incoming
|
|
|
|
// message.
|
|
|
|
// dropped = chatty message holding count
|
|
|
|
// elem = incoming message
|
2016-12-17 01:09:15 +01:00
|
|
|
// outgoing: if match != DIFFERENT
|
2016-12-17 01:09:15 +01:00
|
|
|
// delete chatty message holding count
|
|
|
|
// dropped = reference to last held-back incoming
|
|
|
|
// message, set to chatty count + 1
|
|
|
|
// currentLast = reference to elem
|
2016-12-17 01:09:15 +01:00
|
|
|
// break: if match == DIFFERENT
|
2016-12-17 01:09:15 +01:00
|
|
|
// log dropped (chatty message)
|
2017-03-31 19:48:39 +02:00
|
|
|
// dropped = nullptr -> State 0
|
2016-12-17 01:09:15 +01:00
|
|
|
// log reference to last held-back (currentLast)
|
|
|
|
// currentLast = copy of elem
|
|
|
|
// log elem
|
|
|
|
//
|
2016-12-17 01:09:15 +01:00
|
|
|
enum match_type match = identical(elem, currentLast);
|
|
|
|
if (match != DIFFERENT) {
|
2016-12-13 19:31:29 +01:00
|
|
|
if (dropped) {
|
2016-12-17 01:09:15 +01:00
|
|
|
// Sum up liblog tag messages?
|
|
|
|
if ((count == 0) /* at Pass 1 */ && (match == SAME_LIBLOG)) {
|
|
|
|
android_log_event_int_t* event =
|
|
|
|
reinterpret_cast<android_log_event_int_t*>(
|
|
|
|
const_cast<char*>(currentLast->getMsg()));
|
|
|
|
//
|
|
|
|
// To unit test, differentiate with something like:
|
|
|
|
// event->header.tag = htole32(CHATTY_LOG_TAG);
|
|
|
|
// here, then instead of delete currentLast below,
|
|
|
|
// log(currentLast) to see the incremental sums form.
|
|
|
|
//
|
|
|
|
uint32_t swab = event->payload.data;
|
|
|
|
unsigned long long total = htole32(swab);
|
|
|
|
event = reinterpret_cast<android_log_event_int_t*>(
|
2017-03-10 23:31:54 +01:00
|
|
|
const_cast<char*>(elem->getMsg()));
|
2016-12-17 01:09:15 +01:00
|
|
|
swab = event->payload.data;
|
|
|
|
|
|
|
|
lastLoggedElements[LOG_ID_EVENTS] = elem;
|
|
|
|
total += htole32(swab);
|
|
|
|
// check for overflow
|
|
|
|
if (total >= UINT32_MAX) {
|
|
|
|
log(currentLast);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2016-12-17 01:09:15 +01:00
|
|
|
return len;
|
|
|
|
}
|
2017-04-14 18:46:57 +02:00
|
|
|
stats.addTotal(currentLast);
|
2016-12-17 01:09:15 +01:00
|
|
|
delete currentLast;
|
|
|
|
swab = total;
|
|
|
|
event->payload.data = htole32(swab);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2016-12-17 01:09:15 +01:00
|
|
|
return len;
|
|
|
|
}
|
2016-12-13 19:31:29 +01:00
|
|
|
if (count == USHRT_MAX) {
|
|
|
|
log(dropped);
|
|
|
|
count = 1;
|
|
|
|
} else {
|
|
|
|
delete dropped;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (count) {
|
2017-04-14 18:46:57 +02:00
|
|
|
stats.addTotal(currentLast);
|
2016-12-13 19:31:29 +01:00
|
|
|
currentLast->setDropped(count);
|
|
|
|
}
|
|
|
|
droppedElements[log_id] = currentLast;
|
|
|
|
lastLoggedElements[log_id] = elem;
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2016-12-13 19:31:29 +01:00
|
|
|
return len;
|
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
if (dropped) { // State 1 or 2
|
|
|
|
if (count) { // State 2
|
|
|
|
log(dropped); // report chatty
|
|
|
|
} else { // State 1
|
|
|
|
delete dropped;
|
2016-12-17 01:09:15 +01:00
|
|
|
}
|
2017-03-31 19:48:39 +02:00
|
|
|
droppedElements[log_id] = nullptr;
|
2017-03-10 23:31:54 +01:00
|
|
|
log(currentLast); // report last message in the series
|
|
|
|
} else { // State 0
|
2016-12-13 19:31:29 +01:00
|
|
|
delete currentLast;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lastLoggedElements[log_id] = new LogBufferElement(*elem);
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2016-12-13 19:31:29 +01:00
|
|
|
log(elem);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2016-12-13 19:31:29 +01:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
// assumes LogBuffer::wrlock() held, owns elem, look after garbage collection
|
2016-12-13 19:31:29 +01:00
|
|
|
void LogBuffer::log(LogBufferElement* elem) {
|
2019-08-21 22:17:12 +02:00
|
|
|
mLogElements.push_back(elem);
|
2015-03-10 21:51:35 +01:00
|
|
|
stats.add(elem);
|
2016-12-13 19:31:29 +01:00
|
|
|
maybePrune(elem->getLogId());
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
2015-09-30 16:40:09 +02:00
|
|
|
// Prune at most 10% of the log entries or maxPrune, whichever is less.
|
2014-02-26 18:50:16 +01:00
|
|
|
//
|
2017-04-18 23:09:45 +02:00
|
|
|
// LogBuffer::wrlock() must be held when this function is called.
|
2014-02-26 18:50:16 +01:00
|
|
|
void LogBuffer::maybePrune(log_id_t id) {
|
2014-02-06 23:48:50 +01:00
|
|
|
size_t sizes = stats.sizes(id);
|
2015-08-10 19:23:56 +02:00
|
|
|
unsigned long maxSize = log_buffer_size(id);
|
|
|
|
if (sizes > maxSize) {
|
2015-08-19 21:20:36 +02:00
|
|
|
size_t sizeOver = sizes - ((maxSize * 9) / 10);
|
2015-09-30 16:40:09 +02:00
|
|
|
size_t elements = stats.realElements(id);
|
|
|
|
size_t minElements = elements / 100;
|
|
|
|
if (minElements < minPrune) {
|
|
|
|
minElements = minPrune;
|
|
|
|
}
|
2015-08-10 19:23:56 +02:00
|
|
|
unsigned long pruneRows = elements * sizeOver / sizes;
|
2015-09-30 16:40:09 +02:00
|
|
|
if (pruneRows < minElements) {
|
2015-08-10 19:23:56 +02:00
|
|
|
pruneRows = minElements;
|
2014-01-14 01:37:51 +01:00
|
|
|
}
|
2015-09-30 16:40:09 +02:00
|
|
|
if (pruneRows > maxPrune) {
|
|
|
|
pruneRows = maxPrune;
|
2015-08-19 21:20:36 +02:00
|
|
|
}
|
2014-01-14 01:37:51 +01:00
|
|
|
prune(id, pruneRows);
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-04 01:08:50 +02:00
|
|
|
LogBufferElementCollection::iterator LogBuffer::erase(
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElementCollection::iterator it, bool coalesce) {
|
|
|
|
LogBufferElement* element = *it;
|
2015-08-28 17:02:59 +02:00
|
|
|
log_id_t id = element->getLogId();
|
|
|
|
|
2016-10-21 18:46:42 +02:00
|
|
|
// Remove iterator references in the various lists that will become stale
|
|
|
|
// after the element is erased from the main logging list.
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
{ // start of scope for found iterator
|
|
|
|
int key = ((id == LOG_ID_EVENTS) || (id == LOG_ID_SECURITY))
|
|
|
|
? element->getTag()
|
|
|
|
: element->getUid();
|
2016-07-15 00:34:30 +02:00
|
|
|
LogBufferIteratorMap::iterator found = mLastWorst[id].find(key);
|
|
|
|
if ((found != mLastWorst[id].end()) && (it == found->second)) {
|
|
|
|
mLastWorst[id].erase(found);
|
2015-08-28 17:02:59 +02:00
|
|
|
}
|
|
|
|
}
|
2015-03-10 21:51:35 +01:00
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
{ // start of scope for pid found iterator
|
2016-10-21 18:46:42 +02:00
|
|
|
// element->getUid() may not be AID_SYSTEM for next-best-watermark.
|
2016-10-24 17:20:26 +02:00
|
|
|
// will not assume id != LOG_ID_EVENTS or LOG_ID_SECURITY for KISS and
|
|
|
|
// long term code stability, find() check should be fast for those ids.
|
2015-08-28 17:02:59 +02:00
|
|
|
LogBufferPidIteratorMap::iterator found =
|
|
|
|
mLastWorstPidOfSystem[id].find(element->getPid());
|
2017-03-10 23:31:54 +01:00
|
|
|
if ((found != mLastWorstPidOfSystem[id].end()) &&
|
|
|
|
(it == found->second)) {
|
2015-08-28 17:02:59 +02:00
|
|
|
mLastWorstPidOfSystem[id].erase(found);
|
|
|
|
}
|
2015-08-20 02:06:11 +02:00
|
|
|
}
|
2015-08-28 17:02:59 +02:00
|
|
|
|
2016-01-20 01:04:41 +01:00
|
|
|
bool setLast[LOG_ID_MAX];
|
|
|
|
bool doSetLast = false;
|
2020-05-04 19:17:42 +02:00
|
|
|
log_id_for_each(i) { doSetLast |= setLast[i] = oldest_[i] && it == *oldest_[i]; }
|
2016-10-25 01:22:17 +02:00
|
|
|
#ifdef DEBUG_CHECK_FOR_STALE_ENTRIES
|
|
|
|
LogBufferElementCollection::iterator bad = it;
|
2017-03-10 23:31:54 +01:00
|
|
|
int key = ((id == LOG_ID_EVENTS) || (id == LOG_ID_SECURITY))
|
|
|
|
? element->getTag()
|
|
|
|
: element->getUid();
|
2016-10-25 01:22:17 +02:00
|
|
|
#endif
|
2015-03-10 21:51:35 +01:00
|
|
|
it = mLogElements.erase(it);
|
2016-01-20 01:04:41 +01:00
|
|
|
if (doSetLast) {
|
|
|
|
log_id_for_each(i) {
|
|
|
|
if (setLast[i]) {
|
2020-04-30 02:58:18 +02:00
|
|
|
if (__predict_false(it == mLogElements.end())) {
|
2020-05-04 19:17:42 +02:00
|
|
|
oldest_[i] = std::nullopt;
|
2016-01-20 01:04:41 +01:00
|
|
|
} else {
|
2020-05-04 19:17:42 +02:00
|
|
|
oldest_[i] = it; // Store the next iterator even if it does not correspond to
|
2020-04-30 02:58:18 +02:00
|
|
|
// the same log_id, as a starting point for GetOldest().
|
2016-01-20 01:04:41 +01:00
|
|
|
}
|
|
|
|
}
|
2016-01-11 19:58:09 +01:00
|
|
|
}
|
|
|
|
}
|
2016-10-25 01:22:17 +02:00
|
|
|
#ifdef DEBUG_CHECK_FOR_STALE_ENTRIES
|
|
|
|
log_id_for_each(i) {
|
2017-03-10 23:31:54 +01:00
|
|
|
for (auto b : mLastWorst[i]) {
|
2016-10-25 01:22:17 +02:00
|
|
|
if (bad == b.second) {
|
2017-03-10 23:31:54 +01:00
|
|
|
android::prdebug("stale mLastWorst[%d] key=%d mykey=%d\n", i,
|
|
|
|
b.first, key);
|
2016-10-25 01:22:17 +02:00
|
|
|
}
|
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
for (auto b : mLastWorstPidOfSystem[i]) {
|
2016-10-25 01:22:17 +02:00
|
|
|
if (bad == b.second) {
|
2017-03-10 23:31:54 +01:00
|
|
|
android::prdebug("stale mLastWorstPidOfSystem[%d] pid=%d\n", i,
|
|
|
|
b.first);
|
2016-10-25 01:22:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2015-09-30 16:40:09 +02:00
|
|
|
if (coalesce) {
|
2015-08-28 17:02:59 +02:00
|
|
|
stats.erase(element);
|
2015-09-30 16:40:09 +02:00
|
|
|
} else {
|
2015-08-28 17:02:59 +02:00
|
|
|
stats.subtract(element);
|
2015-09-04 01:08:50 +02:00
|
|
|
}
|
2015-08-28 17:02:59 +02:00
|
|
|
delete element;
|
2015-03-10 21:51:35 +01:00
|
|
|
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
2015-04-18 00:38:04 +02:00
|
|
|
// Define a temporary mechanism to report the last LogBufferElement pointer
|
|
|
|
// for the specified uid, pid and tid. Used below to help merge-sort when
|
|
|
|
// pruning for worst UID.
|
|
|
|
class LogBufferElementKey {
|
|
|
|
const union {
|
|
|
|
struct {
|
2016-12-13 21:44:20 +01:00
|
|
|
uint32_t uid;
|
2015-04-18 00:38:04 +02:00
|
|
|
uint16_t pid;
|
|
|
|
uint16_t tid;
|
|
|
|
} __packed;
|
|
|
|
uint64_t value;
|
|
|
|
} __packed;
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
public:
|
|
|
|
LogBufferElementKey(uid_t uid, pid_t pid, pid_t tid)
|
|
|
|
: uid(uid), pid(pid), tid(tid) {
|
|
|
|
}
|
|
|
|
explicit LogBufferElementKey(uint64_t key) : value(key) {
|
2015-08-28 17:02:59 +02:00
|
|
|
}
|
2015-04-18 00:38:04 +02:00
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
uint64_t getKey() {
|
|
|
|
return value;
|
|
|
|
}
|
2015-04-18 00:38:04 +02:00
|
|
|
};
|
|
|
|
|
2015-05-19 18:12:30 +02:00
|
|
|
class LogBufferElementLast {
|
2017-03-10 23:31:54 +01:00
|
|
|
typedef std::unordered_map<uint64_t, LogBufferElement*> LogBufferElementMap;
|
2015-05-19 18:12:30 +02:00
|
|
|
LogBufferElementMap map;
|
2015-04-18 00:38:04 +02:00
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
public:
|
2018-08-13 23:22:56 +02:00
|
|
|
bool coalesce(LogBufferElement* element, uint16_t dropped) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElementKey key(element->getUid(), element->getPid(),
|
2015-08-28 17:02:59 +02:00
|
|
|
element->getTid());
|
2015-05-19 18:12:30 +02:00
|
|
|
LogBufferElementMap::iterator it = map.find(key.getKey());
|
|
|
|
if (it != map.end()) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* found = it->second;
|
2018-08-13 23:22:56 +02:00
|
|
|
uint16_t moreDropped = found->getDropped();
|
2015-08-28 17:02:59 +02:00
|
|
|
if ((dropped + moreDropped) > USHRT_MAX) {
|
2015-05-19 18:12:30 +02:00
|
|
|
map.erase(it);
|
2015-04-18 00:38:04 +02:00
|
|
|
} else {
|
2015-08-28 17:02:59 +02:00
|
|
|
found->setDropped(dropped + moreDropped);
|
2015-04-18 00:38:04 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
void add(LogBufferElement* element) {
|
|
|
|
LogBufferElementKey key(element->getUid(), element->getPid(),
|
2015-08-28 17:02:59 +02:00
|
|
|
element->getTid());
|
|
|
|
map[key.getKey()] = element;
|
2015-04-18 00:38:04 +02:00
|
|
|
}
|
|
|
|
|
2015-04-20 23:08:56 +02:00
|
|
|
inline void clear() {
|
2015-05-19 18:12:30 +02:00
|
|
|
map.clear();
|
2015-04-20 23:08:56 +02:00
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
void clear(LogBufferElement* element) {
|
2019-08-21 23:16:34 +02:00
|
|
|
uint64_t current = element->getRealTime().nsec() - (EXPIRE_RATELIMIT * NS_PER_SEC);
|
2017-03-10 23:31:54 +01:00
|
|
|
for (LogBufferElementMap::iterator it = map.begin(); it != map.end();) {
|
|
|
|
LogBufferElement* mapElement = it->second;
|
2019-08-21 23:16:34 +02:00
|
|
|
if (mapElement->getDropped() >= EXPIRE_THRESHOLD &&
|
|
|
|
current > mapElement->getRealTime().nsec()) {
|
2015-05-19 18:12:30 +02:00
|
|
|
it = map.erase(it);
|
|
|
|
} else {
|
|
|
|
++it;
|
2015-04-20 23:08:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-18 00:38:04 +02:00
|
|
|
};
|
|
|
|
|
2017-05-11 22:28:33 +02:00
|
|
|
// If the selected reader is blocking our pruning progress, decide on
|
|
|
|
// what kind of mitigation is necessary to unblock the situation.
|
|
|
|
void LogBuffer::kickMe(LogTimeEntry* me, log_id_t id, unsigned long pruneRows) {
|
|
|
|
if (stats.sizes(id) > (2 * log_buffer_size(id))) { // +100%
|
|
|
|
// A misbehaving or slow reader has its connection
|
|
|
|
// dropped if we hit too much memory pressure.
|
2019-11-16 02:37:03 +01:00
|
|
|
android::prdebug("Kicking blocked reader, pid %d, from LogBuffer::kickMe()\n",
|
|
|
|
me->mClient->getPid());
|
2017-05-11 22:28:33 +02:00
|
|
|
me->release_Locked();
|
|
|
|
} else if (me->mTimeout.tv_sec || me->mTimeout.tv_nsec) {
|
|
|
|
// Allow a blocked WRAP timeout reader to
|
|
|
|
// trigger and start reporting the log data.
|
|
|
|
me->triggerReader_Locked();
|
|
|
|
} else {
|
|
|
|
// tell slow reader to skip entries to catch up
|
2019-11-16 02:37:03 +01:00
|
|
|
android::prdebug(
|
|
|
|
"Skipping %lu entries from slow reader, pid %d, from LogBuffer::kickMe()\n",
|
|
|
|
pruneRows, me->mClient->getPid());
|
2017-05-11 22:28:33 +02:00
|
|
|
me->triggerSkip_Locked(id, pruneRows);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 18:50:16 +01:00
|
|
|
// prune "pruneRows" of type "id" from the buffer.
|
|
|
|
//
|
2015-09-08 18:12:51 +02:00
|
|
|
// This garbage collection task is used to expire log entries. It is called to
|
|
|
|
// remove all logs (clear), all UID logs (unprivileged clear), or every
|
|
|
|
// 256 or 10% of the total logs (whichever is less) to prune the logs.
|
|
|
|
//
|
|
|
|
// First there is a prep phase where we discover the reader region lock that
|
|
|
|
// acts as a backstop to any pruning activity to stop there and go no further.
|
|
|
|
//
|
|
|
|
// There are three major pruning loops that follow. All expire from the oldest
|
|
|
|
// entries. Since there are multiple log buffers, the Android logging facility
|
|
|
|
// will appear to drop entries 'in the middle' when looking at multiple log
|
|
|
|
// sources and buffers. This effect is slightly more prominent when we prune
|
|
|
|
// the worst offender by logging source. Thus the logs slowly loose content
|
|
|
|
// and value as you move back in time. This is preferred since chatty sources
|
|
|
|
// invariably move the logs value down faster as less chatty sources would be
|
|
|
|
// expired in the noise.
|
|
|
|
//
|
|
|
|
// The first loop performs blacklisting and worst offender pruning. Falling
|
|
|
|
// through when there are no notable worst offenders and have not hit the
|
|
|
|
// region lock preventing further worst offender pruning. This loop also looks
|
|
|
|
// after managing the chatty log entries and merging to help provide
|
|
|
|
// statistical basis for blame. The chatty entries are not a notification of
|
|
|
|
// how much logs you may have, but instead represent how much logs you would
|
|
|
|
// have had in a virtual log buffer that is extended to cover all the in-memory
|
|
|
|
// logs without loss. They last much longer than the represented pruned logs
|
|
|
|
// since they get multiplied by the gains in the non-chatty log sources.
|
|
|
|
//
|
|
|
|
// The second loop get complicated because an algorithm of watermarks and
|
|
|
|
// history is maintained to reduce the order and keep processing time
|
|
|
|
// down to a minimum at scale. These algorithms can be costly in the face
|
|
|
|
// of larger log buffers, or severly limited processing time granted to a
|
|
|
|
// background task at lowest priority.
|
|
|
|
//
|
|
|
|
// This second loop does straight-up expiration from the end of the logs
|
|
|
|
// (again, remember for the specified log buffer id) but does some whitelist
|
|
|
|
// preservation. Thus whitelist is a Hail Mary low priority, blacklists and
|
|
|
|
// spam filtration all take priority. This second loop also checks if a region
|
|
|
|
// lock is causing us to buffer too much in the logs to help the reader(s),
|
|
|
|
// and will tell the slowest reader thread to skip log entries, and if
|
|
|
|
// persistent and hits a further threshold, kill the reader thread.
|
|
|
|
//
|
|
|
|
// The third thread is optional, and only gets hit if there was a whitelist
|
|
|
|
// and more needs to be pruned against the backstop of the region lock.
|
|
|
|
//
|
2017-04-18 23:09:45 +02:00
|
|
|
// LogBuffer::wrlock() must be held when this function is called.
|
2015-09-08 18:12:51 +02:00
|
|
|
//
|
2015-09-17 00:34:00 +02:00
|
|
|
bool LogBuffer::prune(log_id_t id, unsigned long pruneRows, uid_t caller_uid) {
|
2017-03-31 19:48:39 +02:00
|
|
|
LogTimeEntry* oldest = nullptr;
|
2015-09-17 00:34:00 +02:00
|
|
|
bool busy = false;
|
2015-09-17 00:34:00 +02:00
|
|
|
bool clearAll = pruneRows == ULONG_MAX;
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
LogTimeEntry::rdlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
|
|
|
|
// Region locked?
|
2015-08-28 17:02:59 +02:00
|
|
|
LastLogTimes::iterator times = mTimes.begin();
|
2017-03-10 23:31:54 +01:00
|
|
|
while (times != mTimes.end()) {
|
2018-10-09 02:33:50 +02:00
|
|
|
LogTimeEntry* entry = times->get();
|
|
|
|
if (entry->isWatching(id) &&
|
2017-03-10 23:31:54 +01:00
|
|
|
(!oldest || (oldest->mStart > entry->mStart) ||
|
|
|
|
((oldest->mStart == entry->mStart) &&
|
|
|
|
(entry->mTimeout.tv_sec || entry->mTimeout.tv_nsec)))) {
|
2014-02-26 18:50:16 +01:00
|
|
|
oldest = entry;
|
|
|
|
}
|
2015-08-28 17:02:59 +02:00
|
|
|
times++;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
2014-02-07 03:11:13 +01:00
|
|
|
LogBufferElementCollection::iterator it;
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
if (__predict_false(caller_uid != AID_ROOT)) { // unlikely
|
2016-09-02 00:48:36 +02:00
|
|
|
// Only here if clear all request from non system source, so chatty
|
|
|
|
// filter logistics is not required.
|
2020-04-30 02:58:18 +02:00
|
|
|
it = GetOldest(id);
|
2016-01-11 19:58:09 +01:00
|
|
|
while (it != mLogElements.end()) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* element = *it;
|
2014-06-12 20:16:16 +02:00
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
if ((element->getLogId() != id) ||
|
|
|
|
(element->getUid() != caller_uid)) {
|
2015-09-17 00:34:00 +02:00
|
|
|
++it;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
if (oldest && oldest->mStart <= element->getSequence()) {
|
2020-04-08 19:47:26 +02:00
|
|
|
busy = true;
|
|
|
|
kickMe(oldest, id, pruneRows);
|
2014-06-12 20:16:16 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-09-17 00:34:00 +02:00
|
|
|
it = erase(it);
|
2016-09-02 00:48:36 +02:00
|
|
|
if (--pruneRows == 0) {
|
|
|
|
break;
|
|
|
|
}
|
2014-06-12 20:16:16 +02:00
|
|
|
}
|
|
|
|
LogTimeEntry::unlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
return busy;
|
2014-06-12 20:16:16 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 17:02:59 +02:00
|
|
|
// prune by worst offenders; by blacklist, UID, and by PID of system UID
|
2020-05-02 02:03:20 +02:00
|
|
|
bool hasBlacklist = (id != LOG_ID_SECURITY) && prune_->naughty();
|
2015-09-17 00:34:00 +02:00
|
|
|
while (!clearAll && (pruneRows > 0)) {
|
2014-02-07 03:11:13 +01:00
|
|
|
// recalculate the worst offender on every batched pass
|
2017-03-10 23:31:54 +01:00
|
|
|
int worst = -1; // not valid for getUid() or getKey()
|
2014-02-07 03:11:13 +01:00
|
|
|
size_t worst_sizes = 0;
|
|
|
|
size_t second_worst_sizes = 0;
|
2017-03-10 23:31:54 +01:00
|
|
|
pid_t worstPid = 0; // POSIX guarantees PID != 0
|
2014-02-07 03:11:13 +01:00
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
if (worstUidEnabledForLogid(id) && prune_->worstUidEnabled()) {
|
2016-07-15 00:34:30 +02:00
|
|
|
// Calculate threshold as 12.5% of available storage
|
|
|
|
size_t threshold = log_buffer_size(id) / 8;
|
2015-08-28 17:02:59 +02:00
|
|
|
|
2016-07-15 00:34:30 +02:00
|
|
|
if ((id == LOG_ID_EVENTS) || (id == LOG_ID_SECURITY)) {
|
2017-03-10 23:31:54 +01:00
|
|
|
stats.sortTags(AID_ROOT, (pid_t)0, 2, id)
|
|
|
|
.findWorst(worst, worst_sizes, second_worst_sizes,
|
|
|
|
threshold);
|
2016-10-24 17:20:26 +02:00
|
|
|
// per-pid filter for AID_SYSTEM sources is too complex
|
2016-07-15 00:34:30 +02:00
|
|
|
} else {
|
2017-03-10 23:31:54 +01:00
|
|
|
stats.sort(AID_ROOT, (pid_t)0, 2, id)
|
|
|
|
.findWorst(worst, worst_sizes, second_worst_sizes,
|
|
|
|
threshold);
|
2016-07-15 00:34:30 +02:00
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
if ((worst == AID_SYSTEM) && prune_->worstPidOfSystemEnabled()) {
|
2017-03-10 23:31:54 +01:00
|
|
|
stats.sortPids(worst, (pid_t)0, 2, id)
|
|
|
|
.findWorst(worstPid, worst_sizes, second_worst_sizes);
|
2015-08-28 17:02:59 +02:00
|
|
|
}
|
|
|
|
}
|
2014-02-07 03:11:13 +01:00
|
|
|
}
|
|
|
|
|
2015-03-10 21:51:35 +01:00
|
|
|
// skip if we have neither worst nor naughty filters
|
2016-07-15 00:34:30 +02:00
|
|
|
if ((worst == -1) && !hasBlacklist) {
|
2015-03-10 21:51:35 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-02-07 03:11:13 +01:00
|
|
|
bool kick = false;
|
2020-04-30 02:58:18 +02:00
|
|
|
bool leading = true; // true if starting from the oldest log entry, false if starting from
|
|
|
|
// a specific chatty entry.
|
2015-09-08 18:12:51 +02:00
|
|
|
// Perform at least one mandatory garbage collection cycle in following
|
|
|
|
// - clear leading chatty tags
|
2015-09-30 16:40:09 +02:00
|
|
|
// - coalesce chatty tags
|
2015-09-08 18:12:51 +02:00
|
|
|
// - check age-out of preserved logs
|
|
|
|
bool gc = pruneRows <= 1;
|
2016-07-15 00:34:30 +02:00
|
|
|
if (!gc && (worst != -1)) {
|
2017-03-10 23:31:54 +01:00
|
|
|
{ // begin scope for worst found iterator
|
|
|
|
LogBufferIteratorMap::iterator found =
|
|
|
|
mLastWorst[id].find(worst);
|
|
|
|
if ((found != mLastWorst[id].end()) &&
|
|
|
|
(found->second != mLogElements.end())) {
|
2015-08-28 17:02:59 +02:00
|
|
|
leading = false;
|
|
|
|
it = found->second;
|
|
|
|
}
|
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
if (worstPid) { // begin scope for pid worst found iterator
|
2016-10-24 17:20:26 +02:00
|
|
|
// FYI: worstPid only set if !LOG_ID_EVENTS and
|
|
|
|
// !LOG_ID_SECURITY, not going to make that assumption ...
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferPidIteratorMap::iterator found =
|
|
|
|
mLastWorstPidOfSystem[id].find(worstPid);
|
|
|
|
if ((found != mLastWorstPidOfSystem[id].end()) &&
|
|
|
|
(found->second != mLogElements.end())) {
|
2015-08-28 17:02:59 +02:00
|
|
|
leading = false;
|
|
|
|
it = found->second;
|
|
|
|
}
|
2015-08-20 02:06:11 +02:00
|
|
|
}
|
|
|
|
}
|
2020-04-30 02:58:18 +02:00
|
|
|
if (leading) {
|
|
|
|
it = GetOldest(id);
|
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
static const timespec too_old = { EXPIRE_HOUR_THRESHOLD * 60 * 60, 0 };
|
2015-08-24 22:43:27 +02:00
|
|
|
LogBufferElementCollection::iterator lastt;
|
|
|
|
lastt = mLogElements.end();
|
|
|
|
--lastt;
|
2015-04-18 00:38:04 +02:00
|
|
|
LogBufferElementLast last;
|
2015-08-20 02:06:11 +02:00
|
|
|
while (it != mLogElements.end()) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* element = *it;
|
2014-02-07 03:11:13 +01:00
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
if (oldest && oldest->mStart <= element->getSequence()) {
|
2020-04-08 19:47:26 +02:00
|
|
|
busy = true;
|
2017-05-11 22:28:33 +02:00
|
|
|
// Do not let chatty eliding trigger any reader mitigation
|
2014-02-07 03:11:13 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-08-28 17:02:59 +02:00
|
|
|
if (element->getLogId() != id) {
|
2014-02-11 21:29:31 +01:00
|
|
|
++it;
|
|
|
|
continue;
|
|
|
|
}
|
2016-10-21 18:46:42 +02:00
|
|
|
// below this point element->getLogId() == id
|
2014-02-11 21:29:31 +01:00
|
|
|
|
2018-08-13 23:22:56 +02:00
|
|
|
uint16_t dropped = element->getDropped();
|
2014-02-11 21:29:31 +01:00
|
|
|
|
2015-03-16 20:04:09 +01:00
|
|
|
// remove any leading drops
|
|
|
|
if (leading && dropped) {
|
|
|
|
it = erase(it);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-08-28 17:02:59 +02:00
|
|
|
if (dropped && last.coalesce(element, dropped)) {
|
2015-09-30 16:40:09 +02:00
|
|
|
it = erase(it, true);
|
2015-03-16 20:04:09 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
int key = ((id == LOG_ID_EVENTS) || (id == LOG_ID_SECURITY))
|
|
|
|
? element->getTag()
|
|
|
|
: element->getUid();
|
2016-07-15 00:34:30 +02:00
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
if (hasBlacklist && prune_->naughty(element)) {
|
2015-08-28 17:02:59 +02:00
|
|
|
last.clear(element);
|
2015-03-16 20:04:09 +01:00
|
|
|
it = erase(it);
|
|
|
|
if (dropped) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pruneRows--;
|
|
|
|
if (pruneRows == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-07-15 00:34:30 +02:00
|
|
|
if (key == worst) {
|
2015-03-16 20:04:09 +01:00
|
|
|
kick = true;
|
|
|
|
if (worst_sizes < second_worst_sizes) {
|
|
|
|
break;
|
|
|
|
}
|
2015-08-28 17:02:59 +02:00
|
|
|
worst_sizes -= element->getMsgLen();
|
2015-03-16 20:04:09 +01:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
if ((element->getRealTime() < ((*lastt)->getRealTime() - too_old)) ||
|
|
|
|
(element->getRealTime() > (*lastt)->getRealTime())) {
|
2015-08-24 22:43:27 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-03-16 20:04:09 +01:00
|
|
|
if (dropped) {
|
2015-08-28 17:02:59 +02:00
|
|
|
last.add(element);
|
2017-03-10 23:31:54 +01:00
|
|
|
if (worstPid &&
|
|
|
|
((!gc && (element->getPid() == worstPid)) ||
|
|
|
|
(mLastWorstPidOfSystem[id].find(element->getPid()) ==
|
|
|
|
mLastWorstPidOfSystem[id].end()))) {
|
2016-10-21 18:46:42 +02:00
|
|
|
// element->getUid() may not be AID_SYSTEM, next best
|
2016-10-24 17:20:26 +02:00
|
|
|
// watermark if current one empty. id is not LOG_ID_EVENTS
|
|
|
|
// or LOG_ID_SECURITY because of worstPid check.
|
2016-09-01 16:28:44 +02:00
|
|
|
mLastWorstPidOfSystem[id][element->getPid()] = it;
|
2015-08-28 17:02:59 +02:00
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
if ((!gc && !worstPid && (key == worst)) ||
|
|
|
|
(mLastWorst[id].find(key) == mLastWorst[id].end())) {
|
2016-07-15 00:34:30 +02:00
|
|
|
mLastWorst[id][key] = it;
|
2015-08-24 22:43:27 +02:00
|
|
|
}
|
2015-03-16 20:04:09 +01:00
|
|
|
++it;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:31:54 +01:00
|
|
|
if ((key != worst) ||
|
|
|
|
(worstPid && (element->getPid() != worstPid))) {
|
2015-06-01 18:41:19 +02:00
|
|
|
leading = false;
|
2015-08-28 17:02:59 +02:00
|
|
|
last.clear(element);
|
2014-02-07 03:11:13 +01:00
|
|
|
++it;
|
2015-03-10 21:51:35 +01:00
|
|
|
continue;
|
|
|
|
}
|
2016-10-21 18:46:42 +02:00
|
|
|
// key == worst below here
|
|
|
|
// If worstPid set, then element->getPid() == worstPid below here
|
2015-03-10 21:51:35 +01:00
|
|
|
|
|
|
|
pruneRows--;
|
|
|
|
if (pruneRows == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kick = true;
|
2015-03-16 20:04:09 +01:00
|
|
|
|
2018-08-13 23:22:56 +02:00
|
|
|
uint16_t len = element->getMsgLen();
|
logd: deal with sloppy leading expire messages
The odds of chatty content also leading the logs is pretty high eg:
1799 12017 I logd: uid=10007 chatty comm=Binder_B, expire 4 lines
1799 1829 I logd: uid=10007 chatty comm=Binder_2, expire 4 lines
1919 20637 I logd: uid=10007 chatty comm=m.sersistent, expire 1 line
1919 20638 I logd: uid=10007 chatty comm=s.persistent, expire 1 line
1919 2316 I logd: uid=10007 chatty comm=UlrDispatch, expire 4 lines
19379 20634 I logd: uid=10045 chatty, expire 14 lines
19379 19388 I logd: uid=10045 chatty comm=lizerDaemon, expire 4 lines
591 4396 I logd: uid=1000 chatty comm=Thread-220, expire 5 lines
591 1377 I logd: uid=1000 chatty comm=Thread-92, expire 4 lines
1919 2267 I logd: uid=10007 chatty comm=WifiScanner, expire 4 lines
591 4397 I logd: uid=1000 chatty comm=DhcpClient, expire 4 lines
591 4398 I logd: uid=1000 chatty comm=Thread-222, expire 4 lines
226 580 D CommandListener: Setting iface cfg
Change-Id: I5ab24bc7bf5d2690bae7e789831b07f23ff8bcc6
2015-05-22 19:03:31 +02:00
|
|
|
|
|
|
|
// do not create any leading drops
|
|
|
|
if (leading) {
|
|
|
|
it = erase(it);
|
2015-03-16 20:04:09 +01:00
|
|
|
} else {
|
2015-08-28 17:02:59 +02:00
|
|
|
stats.drop(element);
|
|
|
|
element->setDropped(1);
|
|
|
|
if (last.coalesce(element, 1)) {
|
2015-09-30 16:40:09 +02:00
|
|
|
it = erase(it, true);
|
logd: deal with sloppy leading expire messages
The odds of chatty content also leading the logs is pretty high eg:
1799 12017 I logd: uid=10007 chatty comm=Binder_B, expire 4 lines
1799 1829 I logd: uid=10007 chatty comm=Binder_2, expire 4 lines
1919 20637 I logd: uid=10007 chatty comm=m.sersistent, expire 1 line
1919 20638 I logd: uid=10007 chatty comm=s.persistent, expire 1 line
1919 2316 I logd: uid=10007 chatty comm=UlrDispatch, expire 4 lines
19379 20634 I logd: uid=10045 chatty, expire 14 lines
19379 19388 I logd: uid=10045 chatty comm=lizerDaemon, expire 4 lines
591 4396 I logd: uid=1000 chatty comm=Thread-220, expire 5 lines
591 1377 I logd: uid=1000 chatty comm=Thread-92, expire 4 lines
1919 2267 I logd: uid=10007 chatty comm=WifiScanner, expire 4 lines
591 4397 I logd: uid=1000 chatty comm=DhcpClient, expire 4 lines
591 4398 I logd: uid=1000 chatty comm=Thread-222, expire 4 lines
226 580 D CommandListener: Setting iface cfg
Change-Id: I5ab24bc7bf5d2690bae7e789831b07f23ff8bcc6
2015-05-22 19:03:31 +02:00
|
|
|
} else {
|
2015-08-28 17:02:59 +02:00
|
|
|
last.add(element);
|
2017-03-10 23:31:54 +01:00
|
|
|
if (worstPid &&
|
|
|
|
(!gc || (mLastWorstPidOfSystem[id].find(worstPid) ==
|
|
|
|
mLastWorstPidOfSystem[id].end()))) {
|
2016-10-21 18:46:42 +02:00
|
|
|
// element->getUid() may not be AID_SYSTEM, next best
|
2016-10-24 17:20:26 +02:00
|
|
|
// watermark if current one empty. id is not
|
|
|
|
// LOG_ID_EVENTS or LOG_ID_SECURITY because of worstPid.
|
2015-08-28 17:02:59 +02:00
|
|
|
mLastWorstPidOfSystem[id][worstPid] = it;
|
|
|
|
}
|
2016-07-15 00:34:30 +02:00
|
|
|
if ((!gc && !worstPid) ||
|
2017-03-10 23:31:54 +01:00
|
|
|
(mLastWorst[id].find(worst) == mLastWorst[id].end())) {
|
2016-07-15 00:34:30 +02:00
|
|
|
mLastWorst[id][worst] = it;
|
2015-09-08 18:12:51 +02:00
|
|
|
}
|
logd: deal with sloppy leading expire messages
The odds of chatty content also leading the logs is pretty high eg:
1799 12017 I logd: uid=10007 chatty comm=Binder_B, expire 4 lines
1799 1829 I logd: uid=10007 chatty comm=Binder_2, expire 4 lines
1919 20637 I logd: uid=10007 chatty comm=m.sersistent, expire 1 line
1919 20638 I logd: uid=10007 chatty comm=s.persistent, expire 1 line
1919 2316 I logd: uid=10007 chatty comm=UlrDispatch, expire 4 lines
19379 20634 I logd: uid=10045 chatty, expire 14 lines
19379 19388 I logd: uid=10045 chatty comm=lizerDaemon, expire 4 lines
591 4396 I logd: uid=1000 chatty comm=Thread-220, expire 5 lines
591 1377 I logd: uid=1000 chatty comm=Thread-92, expire 4 lines
1919 2267 I logd: uid=10007 chatty comm=WifiScanner, expire 4 lines
591 4397 I logd: uid=1000 chatty comm=DhcpClient, expire 4 lines
591 4398 I logd: uid=1000 chatty comm=Thread-222, expire 4 lines
226 580 D CommandListener: Setting iface cfg
Change-Id: I5ab24bc7bf5d2690bae7e789831b07f23ff8bcc6
2015-05-22 19:03:31 +02:00
|
|
|
++it;
|
|
|
|
}
|
2015-03-16 20:04:09 +01:00
|
|
|
}
|
2015-03-10 21:51:35 +01:00
|
|
|
if (worst_sizes < second_worst_sizes) {
|
|
|
|
break;
|
2014-02-07 03:11:13 +01:00
|
|
|
}
|
2015-03-10 21:51:35 +01:00
|
|
|
worst_sizes -= len;
|
2014-02-07 03:11:13 +01:00
|
|
|
}
|
2015-04-18 00:38:04 +02:00
|
|
|
last.clear();
|
2014-02-07 03:11:13 +01:00
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
if (!kick || !prune_->worstUidEnabled()) {
|
2017-03-10 23:31:54 +01:00
|
|
|
break; // the following loop will ask bad clients to skip/drop
|
2014-02-07 03:11:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-11 21:29:31 +01:00
|
|
|
bool whitelist = false;
|
2020-05-02 02:03:20 +02:00
|
|
|
bool hasWhitelist = (id != LOG_ID_SECURITY) && prune_->nice() && !clearAll;
|
2020-04-30 02:58:18 +02:00
|
|
|
it = GetOldest(id);
|
2017-03-10 23:31:54 +01:00
|
|
|
while ((pruneRows > 0) && (it != mLogElements.end())) {
|
|
|
|
LogBufferElement* element = *it;
|
2015-03-10 21:51:35 +01:00
|
|
|
|
2015-08-28 17:02:59 +02:00
|
|
|
if (element->getLogId() != id) {
|
2015-03-10 21:51:35 +01:00
|
|
|
it++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
if (oldest && oldest->mStart <= element->getSequence()) {
|
2020-04-08 19:47:26 +02:00
|
|
|
busy = true;
|
|
|
|
if (!whitelist) kickMe(oldest, id, pruneRows);
|
2015-03-10 21:51:35 +01:00
|
|
|
break;
|
|
|
|
}
|
2014-04-02 02:19:47 +02:00
|
|
|
|
2020-05-02 02:03:20 +02:00
|
|
|
if (hasWhitelist && !element->getDropped() && prune_->nice(element)) {
|
2015-08-28 17:02:59 +02:00
|
|
|
// WhiteListed
|
2015-03-10 21:51:35 +01:00
|
|
|
whitelist = true;
|
2014-02-26 18:50:16 +01:00
|
|
|
it++;
|
2015-03-10 21:51:35 +01:00
|
|
|
continue;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
2015-03-10 21:51:35 +01:00
|
|
|
|
|
|
|
it = erase(it);
|
|
|
|
pruneRows--;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
2015-03-10 21:51:35 +01:00
|
|
|
// Do not save the whitelist if we are reader range limited
|
2014-02-11 21:29:31 +01:00
|
|
|
if (whitelist && (pruneRows > 0)) {
|
2020-04-30 02:58:18 +02:00
|
|
|
it = GetOldest(id);
|
2017-03-10 23:31:54 +01:00
|
|
|
while ((it != mLogElements.end()) && (pruneRows > 0)) {
|
|
|
|
LogBufferElement* element = *it;
|
2015-03-10 21:51:35 +01:00
|
|
|
|
2015-08-28 17:02:59 +02:00
|
|
|
if (element->getLogId() != id) {
|
2015-03-10 21:51:35 +01:00
|
|
|
++it;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
if (oldest && oldest->mStart <= element->getSequence()) {
|
2020-04-08 19:47:26 +02:00
|
|
|
busy = true;
|
|
|
|
kickMe(oldest, id, pruneRows);
|
2015-03-10 21:51:35 +01:00
|
|
|
break;
|
2014-02-11 21:29:31 +01:00
|
|
|
}
|
2015-03-10 21:51:35 +01:00
|
|
|
|
|
|
|
it = erase(it);
|
|
|
|
pruneRows--;
|
2014-02-11 21:29:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 18:50:16 +01:00
|
|
|
LogTimeEntry::unlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
|
|
|
|
return (pruneRows > 0) && busy;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// clear all rows of type "id" from the buffer.
|
2015-09-17 00:34:00 +02:00
|
|
|
bool LogBuffer::clear(log_id_t id, uid_t uid) {
|
|
|
|
bool busy = true;
|
|
|
|
// If it takes more than 4 tries (seconds) to clear, then kill reader(s)
|
|
|
|
for (int retry = 4;;) {
|
2017-03-10 23:31:54 +01:00
|
|
|
if (retry == 1) { // last pass
|
2015-09-17 00:34:00 +02:00
|
|
|
// Check if it is still busy after the sleep, we say prune
|
|
|
|
// one entry, not another clear run, so we are looking for
|
|
|
|
// the quick side effect of the return value to tell us if
|
|
|
|
// we have a _blocked_ reader.
|
2017-04-18 23:09:45 +02:00
|
|
|
wrlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
busy = prune(id, 1, uid);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
// It is still busy, blocked reader(s), lets kill them all!
|
|
|
|
// otherwise, lets be a good citizen and preserve the slow
|
|
|
|
// readers and let the clear run (below) deal with determining
|
|
|
|
// if we are still blocked and return an error code to caller.
|
|
|
|
if (busy) {
|
2017-04-18 23:09:45 +02:00
|
|
|
LogTimeEntry::wrlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
LastLogTimes::iterator times = mTimes.begin();
|
|
|
|
while (times != mTimes.end()) {
|
2018-10-09 02:33:50 +02:00
|
|
|
LogTimeEntry* entry = times->get();
|
2015-09-17 00:34:00 +02:00
|
|
|
// Killer punch
|
2018-10-09 02:33:50 +02:00
|
|
|
if (entry->isWatching(id)) {
|
2019-11-16 02:37:03 +01:00
|
|
|
android::prdebug(
|
|
|
|
"Kicking blocked reader, pid %d, from LogBuffer::clear()\n",
|
|
|
|
entry->mClient->getPid());
|
2015-09-17 00:34:00 +02:00
|
|
|
entry->release_Locked();
|
|
|
|
}
|
|
|
|
times++;
|
|
|
|
}
|
|
|
|
LogTimeEntry::unlock();
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 23:09:45 +02:00
|
|
|
wrlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
busy = prune(id, ULONG_MAX, uid);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2015-09-17 00:34:00 +02:00
|
|
|
if (!busy || !--retry) {
|
|
|
|
break;
|
|
|
|
}
|
2017-03-10 23:31:54 +01:00
|
|
|
sleep(1); // Let reader(s) catch up after notification
|
2015-09-17 00:34:00 +02:00
|
|
|
}
|
|
|
|
return busy;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// get the used space associated with "id".
|
|
|
|
unsigned long LogBuffer::getSizeUsed(log_id_t id) {
|
2017-04-18 23:09:45 +02:00
|
|
|
rdlock();
|
2014-02-06 23:48:50 +01:00
|
|
|
size_t retval = stats.sizes(id);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-02-11 21:29:31 +01:00
|
|
|
// set the total space allocated to "id"
|
|
|
|
int LogBuffer::setSize(log_id_t id, unsigned long size) {
|
|
|
|
// Reasonable limits ...
|
2016-09-27 22:08:23 +02:00
|
|
|
if (!__android_logger_valid_buffer_size(size)) {
|
2014-02-11 21:29:31 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2017-04-18 23:09:45 +02:00
|
|
|
wrlock();
|
2014-02-11 21:29:31 +01:00
|
|
|
log_buffer_size(id) = size;
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2014-02-11 21:29:31 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the total space allocated to "id"
|
|
|
|
unsigned long LogBuffer::getSize(log_id_t id) {
|
2017-04-18 23:09:45 +02:00
|
|
|
rdlock();
|
2014-02-11 21:29:31 +01:00
|
|
|
size_t retval = log_buffer_size(id);
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2014-02-11 21:29:31 +01:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
uint64_t LogBuffer::flushTo(SocketClient* reader, uint64_t start, pid_t* lastTid, bool privileged,
|
|
|
|
bool security,
|
|
|
|
int (*filter)(const LogBufferElement* element, void* arg), void* arg) {
|
2014-02-26 18:50:16 +01:00
|
|
|
LogBufferElementCollection::iterator it;
|
|
|
|
uid_t uid = reader->getUid();
|
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
rdlock();
|
2015-01-15 16:29:43 +01:00
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
if (start <= 1) {
|
2015-01-15 16:29:43 +01:00
|
|
|
// client wants to start from the beginning
|
|
|
|
it = mLogElements.begin();
|
|
|
|
} else {
|
|
|
|
// Client wants to start from some specified time. Chances are
|
|
|
|
// we are better off starting from the end of the time sorted list.
|
2019-08-21 23:16:34 +02:00
|
|
|
for (it = mLogElements.end(); it != mLogElements.begin();
|
2017-03-10 23:31:54 +01:00
|
|
|
/* do nothing */) {
|
2015-01-15 16:29:43 +01:00
|
|
|
--it;
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* element = *it;
|
2019-08-21 23:16:34 +02:00
|
|
|
if (element->getSequence() <= start) {
|
|
|
|
it++;
|
2015-01-15 16:29:43 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:16:34 +02:00
|
|
|
uint64_t curr = start;
|
2017-01-23 23:20:31 +01:00
|
|
|
|
2015-01-15 16:29:43 +01:00
|
|
|
for (; it != mLogElements.end(); ++it) {
|
2017-03-10 23:31:54 +01:00
|
|
|
LogBufferElement* element = *it;
|
2014-02-26 18:50:16 +01:00
|
|
|
|
|
|
|
if (!privileged && (element->getUid() != uid)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-01-26 23:32:35 +01:00
|
|
|
if (!security && (element->getLogId() == LOG_ID_SECURITY)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
// NB: calling out to another object with wrlock() held (safe)
|
2015-03-03 22:39:37 +01:00
|
|
|
if (filter) {
|
|
|
|
int ret = (*filter)(element, arg);
|
|
|
|
if (ret == false) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ret != true) {
|
|
|
|
break;
|
|
|
|
}
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 19:48:39 +02:00
|
|
|
bool sameTid = false;
|
|
|
|
if (lastTid) {
|
|
|
|
sameTid = lastTid[element->getLogId()] == element->getTid();
|
|
|
|
// Dropped (chatty) immediately following a valid log from the
|
|
|
|
// same source in the same log buffer indicates we have a
|
|
|
|
// multiple identical squash. chatty that differs source
|
|
|
|
// is due to spam filter. chatty to chatty of different
|
|
|
|
// source is also due to spam filter.
|
|
|
|
lastTid[element->getLogId()] =
|
|
|
|
(element->getDropped() && !sameTid) ? 0 : element->getTid();
|
|
|
|
}
|
2017-01-23 23:20:31 +01:00
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
|
|
|
|
// range locking in LastLogTimes looks after us
|
2019-10-16 00:10:26 +02:00
|
|
|
curr = element->flushTo(reader, this, sameTid);
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2017-05-18 19:06:00 +02:00
|
|
|
if (curr == element->FLUSH_ERROR) {
|
|
|
|
return curr;
|
2017-05-17 21:55:12 +02:00
|
|
|
}
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
rdlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2014-02-26 18:50:16 +01:00
|
|
|
|
2017-05-18 19:06:00 +02:00
|
|
|
return curr;
|
2014-02-26 18:50:16 +01:00
|
|
|
}
|
2014-02-06 23:48:50 +01:00
|
|
|
|
2015-12-17 18:58:43 +01:00
|
|
|
std::string LogBuffer::formatStatistics(uid_t uid, pid_t pid,
|
|
|
|
unsigned int logMask) {
|
2017-04-18 23:09:45 +02:00
|
|
|
wrlock();
|
2014-02-06 23:48:50 +01:00
|
|
|
|
2015-12-17 18:58:43 +01:00
|
|
|
std::string ret = stats.format(uid, pid, logMask);
|
2014-02-06 23:48:50 +01:00
|
|
|
|
2017-04-18 23:09:45 +02:00
|
|
|
unlock();
|
2015-08-20 19:01:44 +02:00
|
|
|
|
|
|
|
return ret;
|
2014-02-06 23:48:50 +01:00
|
|
|
}
|