storaged: Cap io_history when loading stats from disk.

Similar to add_record_locked, load_uid_io_proto should respect the
maximum number of records.

Bug: 111578975
Test: storaged_test.load_uid_io_proto
Change-Id: Ic3c5095cdd09d2184f436813b5ab50d3ee0007b2
Merged-In: Ic3c5095cdd09d2184f436813b5ab50d3ee0007b2
This commit is contained in:
David Anderson 2018-07-26 18:01:03 -07:00
parent 0026a14ff2
commit dec6a881ba
3 changed files with 49 additions and 16 deletions

View file

@ -105,6 +105,10 @@ private:
// writes io_history to protobuf
void update_uid_io_proto(unordered_map<int, StoragedProto>* protos);
// Ensure that io_history_ can append |n| items without exceeding
// MAX_UID_RECORDS_SIZE in size.
void maybe_shrink_history_for_items(size_t nitems);
public:
uid_monitor();
// called by storaged main thread
@ -124,6 +128,8 @@ public:
void clear_user_history(userid_t user_id);
map<uint64_t, uid_records>& io_history() { return io_history_; }
static constexpr int MAX_UID_RECORDS_SIZE = 1000 * 48; // 1000 uids in 48 hours
};
#endif /* _STORAGED_UID_MONITOR_H_ */

View file

@ -201,8 +201,6 @@ std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats_locked()
namespace {
const int MAX_UID_RECORDS_SIZE = 1000 * 48; // 1000 uids in 48 hours
inline size_t history_size(
const std::map<uint64_t, struct uid_records>& history)
{
@ -246,15 +244,18 @@ void uid_monitor::add_records_locked(uint64_t curr_ts)
return;
// make some room for new records
ssize_t overflow = history_size(io_history_) +
new_records.entries.size() - MAX_UID_RECORDS_SIZE;
maybe_shrink_history_for_items(new_records.entries.size());
io_history_[curr_ts] = new_records;
}
void uid_monitor::maybe_shrink_history_for_items(size_t nitems) {
ssize_t overflow = history_size(io_history_) + nitems - MAX_UID_RECORDS_SIZE;
while (overflow > 0 && io_history_.size() > 0) {
auto del_it = io_history_.begin();
overflow -= del_it->second.entries.size();
io_history_.erase(io_history_.begin());
}
io_history_[curr_ts] = new_records;
}
std::map<uint64_t, struct uid_records> uid_monitor::dump(
@ -508,6 +509,12 @@ void uid_monitor::load_uid_io_proto(userid_t user_id, const UidIOUsage& uid_io_p
}
recs->entries.push_back(record);
}
// We already added items, so this will just cull down to the maximum
// length. We do not remove anything if there is only one entry.
if (io_history_.size() > 1) {
maybe_shrink_history_for_items(0);
}
}
}

View file

@ -616,22 +616,24 @@ TEST(storaged_test, uid_monitor) {
uidm.clear_user_history(0);
EXPECT_EQ(uidm.io_history_.size(), 2UL);
EXPECT_EQ(uidm.io_history_.count(200), 1UL);
EXPECT_EQ(uidm.io_history_.count(300), 1UL);
EXPECT_EQ(io_history.size(), 2UL);
EXPECT_EQ(io_history.count(200), 1UL);
EXPECT_EQ(io_history.count(300), 1UL);
EXPECT_EQ(uidm.io_history_[200].entries.size(), 1UL);
EXPECT_EQ(uidm.io_history_[300].entries.size(), 1UL);
EXPECT_EQ(io_history[200].entries.size(), 1UL);
EXPECT_EQ(io_history[300].entries.size(), 1UL);
uidm.clear_user_history(1);
EXPECT_EQ(uidm.io_history_.size(), 0UL);
EXPECT_EQ(io_history.size(), 0UL);
}
TEST(storaged_test, load_uid_io_proto) {
uid_monitor uidm;
auto& io_history = uidm.io_history();
uidm.io_history_[200] = {
static const uint64_t kProtoTime = 200;
io_history[kProtoTime] = {
.start_ts = 100,
.entries = {
{ "app1", {
@ -657,10 +659,28 @@ TEST(storaged_test, load_uid_io_proto) {
ASSERT_EQ(protos.size(), size_t(1));
// Loading the same proto many times should not add duplicate entries.
const UidIOUsage& user_0 = protos[0].uid_io_usage();
UidIOUsage user_0 = protos[0].uid_io_usage();
for (size_t i = 0; i < 10000; i++) {
uidm.load_uid_io_proto(0, user_0);
}
ASSERT_EQ(uidm.io_history_.size(), size_t(1));
ASSERT_EQ(uidm.io_history_[200].entries.size(), size_t(3));
ASSERT_EQ(io_history.size(), size_t(1));
ASSERT_EQ(io_history[kProtoTime].entries.size(), size_t(3));
// Create duplicate entries until we go over the limit.
auto record = io_history[kProtoTime];
io_history.clear();
for (size_t i = 0; i < uid_monitor::MAX_UID_RECORDS_SIZE * 2; i++) {
if (i == kProtoTime) {
continue;
}
io_history[i] = record;
}
ASSERT_GT(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
// After loading, the history should be truncated.
for (auto& item : *user_0.mutable_uid_io_items()) {
item.set_end_ts(io_history.size());
}
uidm.load_uid_io_proto(0, user_0);
ASSERT_LE(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
}