storaged: split proto file into multiple CE areas

Use user_id (from app uid) to determine file location.
/data/misc_ce/<user_id>/storaged/storaged.proto

Vold notifies storaged when a user's CE area becomes available.
Then storaged restores data from the proto in that area and
combines them into IO history.

Vold also notifies storaged when the CE area is being deleted.
Storaged clears internal history about this user and deletes the
proto file.

IO perf is stored in user_0 area since it's not user related.

Test: dumpsys storaged before/after multiple users' unlock
Bug: 63740245
Change-Id: I39f923f6b09e9f2a29e9286ce02b3b3bcbfb9f94
This commit is contained in:
Jin Qian 2017-10-18 17:52:14 -07:00
parent 446ab4a378
commit 6df3bc6301
14 changed files with 462 additions and 190 deletions

View file

@ -27,6 +27,7 @@
#include <vector>
#include <batteryservice/IBatteryPropertiesListener.h>
#include <utils/Mutex.h>
#include <android/hardware/health/2.0/IHealth.h>
@ -83,17 +84,18 @@ class storaged_t : public android::hardware::health::V2_0::IHealthInfoCallback,
sp<android::hardware::health::V2_0::IHealth> health;
unique_ptr<storage_info_t> storage_info;
static const uint32_t crc_init;
static const string proto_file;
storaged_proto::StoragedProto proto;
enum stat {
NOT_AVAILABLE,
AVAILABLE,
LOADED,
};
stat proto_stat;
unordered_map<int, storaged_proto::StoragedProto> protos;
Mutex proto_mutex;
void load_proto_locked(userid_t user_id);
void prepare_proto(StoragedProto* proto, userid_t user_id);
void flush_proto_locked(userid_t user_id);
void flush_proto_user_system_locked(StoragedProto* proto);
string proto_path(userid_t user_id) {
return string("/data/misc_ce/") + to_string(user_id) +
"/storaged/storaged.proto";
}
public:
storaged_t(void);
~storaged_t() {}
void event(void);
void event_checked(void);
void pause(void) {
@ -114,8 +116,7 @@ public:
map<uint64_t, struct uid_records> get_uid_records(
double hours, uint64_t threshold, bool force_report) {
return mUidm.dump(hours, threshold, force_report,
proto.mutable_uid_io_usage());
return mUidm.dump(hours, threshold, force_report, &protos);
}
void update_uid_io_interval(int interval) {
@ -124,15 +125,8 @@ public:
}
}
void set_proto_stat_available(bool available) {
if (available) {
if (proto_stat != LOADED) {
proto_stat = AVAILABLE;
}
} else {
proto_stat = NOT_AVAILABLE;
}
};
void add_user_ce(userid_t user_id);
void remove_user_ce(userid_t user_id);
void init_health_service();
virtual ::android::hardware::Return<void> healthInfoChanged(
@ -141,8 +135,7 @@ public:
void report_storage_info();
void load_proto();
void flush_proto();
void flush_protos();
};
// Eventlog tag

View file

@ -21,6 +21,8 @@
#include <chrono>
#include <utils/Mutex.h>
#include "storaged.h"
#include "storaged.pb.h"
@ -28,6 +30,7 @@
friend class test_case_name##_##test_name##_Test
using namespace std;
using namespace android;
using namespace chrono;
using namespace storaged_proto;
@ -51,13 +54,12 @@ protected:
uint32_t nr_days;
vector<uint32_t> weekly_perf;
uint32_t nr_weeks;
sem_t si_lock;
Mutex si_mutex;
storage_info_t() : eol(0), lifetime_a(0), lifetime_b(0),
userdata_total_kb(0), userdata_free_kb(0), nr_samples(0),
daily_perf(WEEK_TO_DAYS, 0), nr_days(0),
weekly_perf(YEAR_TO_WEEKS, 0), nr_weeks(0) {
sem_init(&si_lock, 0, 1);
day_start_tp = system_clock::now();
day_start_tp -= chrono::seconds(duration_cast<chrono::seconds>(
day_start_tp.time_since_epoch()).count() % DAY_TO_SEC);
@ -66,7 +68,7 @@ protected:
storage_info_t* s_info;
public:
static storage_info_t* get_storage_info();
virtual ~storage_info_t() { sem_destroy(&si_lock); }
virtual ~storage_info_t() {};
virtual void report() {};
void load_perf_history_proto(const IOPerfHistory& perf_history);
void refresh(IOPerfHistory* perf_history);

View file

@ -29,6 +29,9 @@ using namespace android::os;
using namespace android::os::storaged;
class StoragedService : public BinderService<StoragedService>, public BnStoraged {
private:
void dumpUidRecordsDebug(int fd, const vector<struct uid_record>& entries);
void dumpUidRecords(int fd, const vector<struct uid_record>& entries);
public:
static status_t start();
static char const* getServiceName() { return "storaged"; }

View file

@ -23,92 +23,101 @@
#include <unordered_map>
#include <vector>
#include <cutils/multiuser.h>
#include <utils/Mutex.h>
#include "storaged.pb.h"
#include "uid_info.h"
#define FRIEND_TEST(test_case_name, test_name) \
friend class test_case_name##_##test_name##_Test
using namespace std;
using namespace storaged_proto;
using namespace android;
using namespace android::os::storaged;
class uid_info : public UidInfo {
public:
bool parse_uid_io_stats(std::string&& s);
bool parse_uid_io_stats(string&& s);
};
struct io_usage {
class io_usage {
public:
io_usage() : bytes{{{0}}} {};
uint64_t bytes[IO_TYPES][UID_STATS][CHARGER_STATS];
bool is_zero() const;
io_usage& operator+= (const io_usage& stats) {
for (int i = 0; i < IO_TYPES; i++) {
for (int j = 0; j < UID_STATS; j++) {
for (int k = 0; k < CHARGER_STATS; k++) {
bytes[i][j][k] += stats.bytes[i][j][k];
}
}
}
return *this;
}
};
struct uid_io_usage {
struct io_usage uid_ios;
userid_t user_id;
io_usage uid_ios;
// mapped from task comm to task io usage
std::map<std::string, struct io_usage> task_ios;
map<string, io_usage> task_ios;
};
struct uid_record {
std::string name;
string name;
struct uid_io_usage ios;
};
struct uid_records {
uint64_t start_ts;
std::vector<struct uid_record> entries;
};
class lock_t {
sem_t* mSem;
public:
lock_t(sem_t* sem) {
mSem = sem;
sem_wait(mSem);
}
~lock_t() {
sem_post(mSem);
}
vector<struct uid_record> entries;
};
class uid_monitor {
private:
FRIEND_TEST(storaged_test, uid_monitor);
// last dump from /proc/uid_io/stats, uid -> uid_info
std::unordered_map<uint32_t, uid_info> last_uid_io_stats;
unordered_map<uint32_t, uid_info> last_uid_io_stats;
// current io usage for next report, app name -> uid_io_usage
std::unordered_map<std::string, struct uid_io_usage> curr_io_stats;
unordered_map<string, struct uid_io_usage> curr_io_stats;
// io usage records, end timestamp -> {start timestamp, vector of records}
std::map<uint64_t, struct uid_records> io_history;
map<uint64_t, struct uid_records> io_history;
// charger ON/OFF
charger_stat_t charger_stat;
// protects curr_io_stats, last_uid_io_stats, records and charger_stat
sem_t um_lock;
Mutex uidm_mutex;
// start time for IO records
uint64_t start_ts;
// true if UID_IO_STATS_PATH is accessible
const bool enable;
// reads from /proc/uid_io/stats
std::unordered_map<uint32_t, uid_info> get_uid_io_stats_locked();
unordered_map<uint32_t, uid_info> get_uid_io_stats_locked();
// flushes curr_io_stats to records
void add_records_locked(uint64_t curr_ts);
// updates curr_io_stats and set last_uid_io_stats
void update_curr_io_stats_locked();
// writes io_history to protobuf
void update_uid_io_proto(UidIOUsage* proto);
void update_uid_io_proto(unordered_map<int, StoragedProto>* protos);
public:
uid_monitor();
~uid_monitor();
// called by storaged main thread
void init(charger_stat_t stat);
// called by storaged -u
std::unordered_map<uint32_t, uid_info> get_uid_io_stats();
unordered_map<uint32_t, uid_info> get_uid_io_stats();
// called by dumpsys
std::map<uint64_t, struct uid_records> dump(
map<uint64_t, struct uid_records> dump(
double hours, uint64_t threshold, bool force_report,
UidIOUsage* uid_io_proto);
unordered_map<int, StoragedProto>* protos);
// called by battery properties listener
void set_charger_state(charger_stat_t stat);
// called by storaged periodic_chore or dump with force_report
bool enabled() { return enable; };
void report(UidIOUsage* proto);
void report(unordered_map<int, StoragedProto>* protos);
// restores io_history from protobuf
void load_uid_io_proto(const UidIOUsage& proto);
};

View file

@ -33,6 +33,7 @@ void get_inc_disk_stats(const struct disk_stats* prev, const struct disk_stats*
void add_disk_stats(struct disk_stats* src, struct disk_stats* dst);
// UID I/O
map<string, io_usage> merge_io_usage(const vector<uid_record>& entries);
void sort_running_uids_info(std::vector<UidInfo> &uids);
// Logging

View file

@ -51,7 +51,6 @@ sp<storaged_t> storaged_sp;
void* storaged_main(void* /* unused */) {
storaged_sp = new storaged_t();
storaged_sp->load_proto();
storaged_sp->init_health_service();
storaged_sp->report_storage_info();

View file

@ -16,6 +16,7 @@
#define LOG_TAG "storaged"
#include <dirent.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
@ -28,6 +29,7 @@
#include <string>
#include <android/hidl/manager/1.0/IServiceManager.h>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <batteryservice/BatteryServiceConstants.h>
#include <cutils/properties.h>
@ -45,13 +47,18 @@ using namespace storaged_proto;
namespace {
const uint32_t benchmark_unit_size = 16 * 1024; // 16KB
/*
* The system user is the initial user that is implicitly created on first boot
* and hosts most of the system services. Keep this in sync with
* frameworks/base/core/java/android/os/UserManager.java
*/
constexpr int USER_SYSTEM = 0;
}
constexpr uint32_t benchmark_unit_size = 16 * 1024; // 16KB
} // namespace
const uint32_t storaged_t::crc_init = 0x5108A4ED; /* STORAGED */
const std::string storaged_t::proto_file =
"/data/misc_ce/0/storaged/storaged.proto";
using android::hardware::health::V1_0::BatteryStatus;
using android::hardware::health::V1_0::toString;
@ -135,7 +142,7 @@ void storaged_t::report_storage_info() {
}
/* storaged_t */
storaged_t::storaged_t(void) : proto_stat(NOT_AVAILABLE) {
storaged_t::storaged_t(void) {
mConfig.periodic_chores_interval_unit =
property_get_int32("ro.storaged.event.interval",
DEFAULT_PERIODIC_CHORES_INTERVAL_UNIT);
@ -161,68 +168,81 @@ storaged_t::storaged_t(void) : proto_stat(NOT_AVAILABLE) {
mTimer = 0;
}
void storaged_t::load_proto() {
std::ifstream in(proto_file,
std::ofstream::in | std::ofstream::binary);
void storaged_t::add_user_ce(userid_t user_id) {
Mutex::Autolock _l(proto_mutex);
protos.insert({user_id, {}});
load_proto_locked(user_id);
protos[user_id].set_loaded(1);
}
if (!in.good()) {
PLOG_TO(SYSTEM, INFO) << "Open " << proto_file << " failed";
proto_stat = NOT_AVAILABLE;
return;
}
void storaged_t::remove_user_ce(userid_t user_id) {
Mutex::Autolock _l(proto_mutex);
protos.erase(user_id);
RemoveFileIfExists(proto_path(user_id), nullptr);
}
proto_stat = AVAILABLE;
void storaged_t::load_proto_locked(userid_t user_id) {
string proto_file = proto_path(user_id);
ifstream in(proto_file, ofstream::in | ofstream::binary);
if (!in.good()) return;
stringstream ss;
ss << in.rdbuf();
proto.Clear();
proto.ParseFromString(ss.str());
StoragedProto* proto = &protos[user_id];
proto->Clear();
proto->ParseFromString(ss.str());
uint32_t crc = proto.crc();
proto.set_crc(crc_init);
std::string proto_str = proto.SerializeAsString();
uint32_t crc = proto->crc();
proto->set_crc(crc_init);
string proto_str = proto->SerializeAsString();
uint32_t computed_crc = crc32(crc_init,
reinterpret_cast<const Bytef*>(proto_str.c_str()),
proto_str.size());
if (crc != computed_crc) {
LOG_TO(SYSTEM, WARNING) << "CRC mismatch in " << proto_file;
proto.Clear();
proto->Clear();
return;
}
proto_stat = LOADED;
mUidm.load_uid_io_proto(proto->uid_io_usage());
storage_info->load_perf_history_proto(proto.perf_history());
mUidm.load_uid_io_proto(proto.uid_io_usage());
if (user_id == USER_SYSTEM) {
storage_info->load_perf_history_proto(proto->perf_history());
}
}
void storaged_t::flush_proto() {
if (proto_stat != LOADED) return;
void storaged_t:: prepare_proto(StoragedProto* proto, userid_t user_id) {
proto->set_version(2);
proto->set_crc(crc_init);
proto.set_version(1);
proto.set_crc(crc_init);
while (proto.ByteSize() < 128 * 1024) {
proto.add_padding(0xFEEDBABE);
if (user_id == USER_SYSTEM) {
while (proto->ByteSize() < 128 * 1024) {
proto->add_padding(0xFEEDBABE);
}
}
std::string proto_str = proto.SerializeAsString();
proto.set_crc(crc32(crc_init,
string proto_str = proto->SerializeAsString();
proto->set_crc(crc32(crc_init,
reinterpret_cast<const Bytef*>(proto_str.c_str()),
proto_str.size()));
proto_str = proto.SerializeAsString();
}
void storaged_t::flush_proto_user_system_locked(StoragedProto* proto) {
string proto_str = proto->SerializeAsString();
const char* data = proto_str.data();
uint32_t size = proto_str.size();
ssize_t ret;
time_point<steady_clock> start, end;
std::string tmp_file = proto_file + "_tmp";
string proto_file = proto_path(USER_SYSTEM);
string tmp_file = proto_file + "_tmp";
unique_fd fd(TEMP_FAILURE_RETRY(open(tmp_file.c_str(),
O_DIRECT | O_SYNC | O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
S_IRUSR | S_IWUSR)));
O_DIRECT | O_SYNC | O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
S_IRUSR | S_IWUSR)));
if (fd == -1) {
PLOG_TO(SYSTEM, ERROR) << "Faied to open tmp file: " << tmp_file;
proto_stat = NOT_AVAILABLE;
return;
}
@ -258,11 +278,40 @@ void storaged_t::flush_proto() {
rename(tmp_file.c_str(), proto_file.c_str());
}
void storaged_t::event(void) {
if (proto_stat == AVAILABLE) {
load_proto();
void storaged_t::flush_proto_locked(userid_t user_id) {
StoragedProto* proto = &protos[user_id];
prepare_proto(proto, user_id);
if (user_id == USER_SYSTEM) {
flush_proto_user_system_locked(proto);
return;
}
string proto_file = proto_path(user_id);
string tmp_file = proto_file + "_tmp";
if (!WriteStringToFile(proto->SerializeAsString(), tmp_file,
S_IRUSR | S_IWUSR)) {
return;
}
/* Atomically replace existing proto file to reduce chance of data loss. */
rename(tmp_file.c_str(), proto_file.c_str());
}
void storaged_t::flush_protos() {
Mutex::Autolock _l(proto_mutex);
for (const auto& it : protos) {
/*
* Don't flush proto if we haven't loaded it from file and combined
* with data in memory.
*/
if (it.second.loaded() != 1) {
continue;
}
flush_proto_locked(it.first);
}
}
void storaged_t::event(void) {
if (mDsm.enabled()) {
mDsm.update();
if (!(mTimer % mConfig.periodic_chores_interval_disk_stats_publish)) {
@ -271,12 +320,17 @@ void storaged_t::event(void) {
}
if (!(mTimer % mConfig.periodic_chores_interval_uid_io)) {
mUidm.report(proto.mutable_uid_io_usage());
Mutex::Autolock _l(proto_mutex);
mUidm.report(&protos);
}
if (storage_info) {
Mutex::Autolock _l(proto_mutex);
storage_info->refresh(protos[USER_SYSTEM].mutable_perf_history());
}
storage_info->refresh(proto.mutable_perf_history());
if (!(mTimer % mConfig.periodic_chores_interval_flush_proto)) {
flush_proto();
flush_protos();
}
mTimer += mConfig.periodic_chores_interval_unit;

View file

@ -22,8 +22,9 @@ message TaskIOUsage {
message UidRecord {
optional string uid_name = 1;
optional IOUsage uid_io = 2;
repeated TaskIOUsage task_io = 3;
optional uint32 user_id = 2;
optional IOUsage uid_io = 3;
repeated TaskIOUsage task_io = 4;
}
message UidIORecords {
@ -53,7 +54,8 @@ message IOPerfHistory {
message StoragedProto {
optional uint32 crc = 1;
optional uint32 version = 2;
optional UidIOUsage uid_io_usage = 3;
optional IOPerfHistory perf_history = 4;
repeated uint32 padding = 5;
optional uint32 loaded = 3;
optional UidIOUsage uid_io_usage = 4;
optional IOPerfHistory perf_history = 5;
repeated uint32 padding = 6;
}

View file

@ -1,7 +1,3 @@
# remove this after vold can create directory for us.
on property:sys.user.0.ce_available=true
mkdir /data/misc_ce/0/storaged
service storaged /system/bin/storaged
class main
priority 10

View file

@ -76,7 +76,7 @@ void storage_info_t::load_perf_history_proto(const IOPerfHistory& perf_history)
}
day_start_tp = {};
day_start_tp += seconds(perf_history.day_start_sec());
day_start_tp += chrono::seconds(perf_history.day_start_sec());
nr_samples = perf_history.nr_samples();
for (auto bw : perf_history.recent_perf()) {
@ -107,11 +107,11 @@ void storage_info_t::refresh(IOPerfHistory* perf_history)
userdata_total_kb = buf.f_bsize * buf.f_blocks >> 10;
userdata_free_kb = buf.f_bfree * buf.f_blocks >> 10;
unique_ptr<lock_t> lock(new lock_t(&si_lock));
Mutex::Autolock _l(si_mutex);
perf_history->Clear();
perf_history->set_day_start_sec(
duration_cast<seconds>(day_start_tp.time_since_epoch()).count());
duration_cast<chrono::seconds>(day_start_tp.time_since_epoch()).count());
for (const uint32_t& bw : recent_perf) {
perf_history->add_recent_perf(bw);
}
@ -136,10 +136,10 @@ void storage_info_t::publish()
void storage_info_t::update_perf_history(uint32_t bw,
const time_point<system_clock>& tp)
{
unique_ptr<lock_t> lock(new lock_t(&si_lock));
Mutex::Autolock _l(si_mutex);
if (tp > day_start_tp &&
duration_cast<seconds>(tp - day_start_tp).count() < DAY_TO_SEC) {
duration_cast<chrono::seconds>(tp - day_start_tp).count() < DAY_TO_SEC) {
if (nr_samples >= recent_perf.size()) {
recent_perf.push_back(bw);
} else {
@ -155,7 +155,7 @@ void storage_info_t::update_perf_history(uint32_t bw,
uint32_t daily_avg_bw = accumulate(recent_perf.begin(),
recent_perf.begin() + nr_samples, 0) / nr_samples;
day_start_tp = tp - seconds(duration_cast<seconds>(
day_start_tp = tp - chrono::seconds(duration_cast<chrono::seconds>(
tp.time_since_epoch()).count() % DAY_TO_SEC);
nr_samples = 0;
@ -182,7 +182,7 @@ void storage_info_t::update_perf_history(uint32_t bw,
vector<int> storage_info_t::get_perf_history()
{
unique_ptr<lock_t> lock(new lock_t(&si_lock));
Mutex::Autolock _l(si_mutex);
vector<int> ret(3 + recent_perf.size() + daily_perf.size() + weekly_perf.size());

View file

@ -30,24 +30,68 @@
#include <private/android_filesystem_config.h>
#include <storaged.h>
#include <storaged_utils.h>
#include <storaged_service.h>
using namespace std;
using namespace android::base;
/*
* The system user is the initial user that is implicitly created on first boot
* and hosts most of the system services. Keep this in sync with
* frameworks/base/core/java/android/os/UserManager.java
*/
const int USER_SYSTEM = 0;
extern sp<storaged_t> storaged_sp;
status_t StoragedService::start() {
return BinderService<StoragedService>::publish();
}
void StoragedService::dumpUidRecords(int fd, const vector<uid_record>& entries) {
map<string, io_usage> merged_entries = merge_io_usage(entries);
for (const auto& rec : merged_entries) {
dprintf(fd, "%s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
" %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
rec.first.c_str(),
rec.second.bytes[READ][FOREGROUND][CHARGER_OFF],
rec.second.bytes[WRITE][FOREGROUND][CHARGER_OFF],
rec.second.bytes[READ][BACKGROUND][CHARGER_OFF],
rec.second.bytes[WRITE][BACKGROUND][CHARGER_OFF],
rec.second.bytes[READ][FOREGROUND][CHARGER_ON],
rec.second.bytes[WRITE][FOREGROUND][CHARGER_ON],
rec.second.bytes[READ][BACKGROUND][CHARGER_ON],
rec.second.bytes[WRITE][BACKGROUND][CHARGER_ON]);
}
}
void StoragedService::dumpUidRecordsDebug(int fd, const vector<uid_record>& entries) {
for (const auto& record : entries) {
const io_usage& uid_usage = record.ios.uid_ios;
dprintf(fd, "%s_%d %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
" %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
record.name.c_str(), record.ios.user_id,
uid_usage.bytes[READ][FOREGROUND][CHARGER_OFF],
uid_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF],
uid_usage.bytes[READ][BACKGROUND][CHARGER_OFF],
uid_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF],
uid_usage.bytes[READ][FOREGROUND][CHARGER_ON],
uid_usage.bytes[WRITE][FOREGROUND][CHARGER_ON],
uid_usage.bytes[READ][BACKGROUND][CHARGER_ON],
uid_usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
for (const auto& task_it : record.ios.task_ios) {
const io_usage& task_usage = task_it.second;
const string& comm = task_it.first;
dprintf(fd, "-> %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
" %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
comm.c_str(),
task_usage.bytes[READ][FOREGROUND][CHARGER_OFF],
task_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF],
task_usage.bytes[READ][BACKGROUND][CHARGER_OFF],
task_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF],
task_usage.bytes[READ][FOREGROUND][CHARGER_ON],
task_usage.bytes[WRITE][FOREGROUND][CHARGER_ON],
task_usage.bytes[READ][BACKGROUND][CHARGER_ON],
task_usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
}
}
}
status_t StoragedService::dump(int fd, const Vector<String16>& args) {
IPCThreadState* self = IPCThreadState::self();
const int pid = self->getCallingPid();
@ -97,7 +141,7 @@ status_t StoragedService::dump(int fd, const Vector<String16>& args) {
}
uint64_t last_ts = 0;
const map<uint64_t, struct uid_records>& records =
map<uint64_t, struct uid_records> records =
storaged_sp->get_uid_records(hours, threshold, force_report);
for (const auto& it : records) {
if (last_ts != it.second.start_ts) {
@ -106,36 +150,10 @@ status_t StoragedService::dump(int fd, const Vector<String16>& args) {
dprintf(fd, ",%" PRIu64 "\n", it.first);
last_ts = it.first;
for (const auto& record : it.second.entries) {
const struct io_usage& uid_usage = record.ios.uid_ios;
dprintf(fd, "%s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
" %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
record.name.c_str(),
uid_usage.bytes[READ][FOREGROUND][CHARGER_OFF],
uid_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF],
uid_usage.bytes[READ][BACKGROUND][CHARGER_OFF],
uid_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF],
uid_usage.bytes[READ][FOREGROUND][CHARGER_ON],
uid_usage.bytes[WRITE][FOREGROUND][CHARGER_ON],
uid_usage.bytes[READ][BACKGROUND][CHARGER_ON],
uid_usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
if (debug) {
for (const auto& task_it : record.ios.task_ios) {
const struct io_usage& task_usage = task_it.second;
const string& comm = task_it.first;
dprintf(fd, "-> %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
" %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
comm.c_str(),
task_usage.bytes[READ][FOREGROUND][CHARGER_OFF],
task_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF],
task_usage.bytes[READ][BACKGROUND][CHARGER_OFF],
task_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF],
task_usage.bytes[READ][FOREGROUND][CHARGER_ON],
task_usage.bytes[WRITE][FOREGROUND][CHARGER_ON],
task_usage.bytes[READ][BACKGROUND][CHARGER_ON],
task_usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
}
}
if (!debug) {
dumpUidRecords(fd, it.second.entries);
} else {
dumpUidRecordsDebug(fd, it.second.entries);
}
}
@ -147,16 +165,12 @@ status_t StoragedService::dump(int fd, const Vector<String16>& args) {
}
binder::Status StoragedService::onUserStarted(int32_t userId) {
if (userId == USER_SYSTEM) {
storaged_sp->set_proto_stat_available(true);
}
storaged_sp->add_user_ce(userId);
return binder::Status::ok();
}
binder::Status StoragedService::onUserStopped(int32_t userId) {
if (userId == USER_SYSTEM) {
storaged_sp->set_proto_stat_available(false);
}
storaged_sp->remove_user_ce(userId);
return binder::Status::ok();
}

View file

@ -50,7 +50,7 @@ const char* UID_IO_STATS_PATH = "/proc/uid_io/stats";
std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats()
{
std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
Mutex::Autolock _l(uidm_mutex);
return get_uid_io_stats_locked();
};
@ -227,6 +227,7 @@ void uid_monitor::add_records_locked(uint64_t curr_ts)
struct uid_record record = {};
record.name = p.first;
if (!p.second.uid_ios.is_zero()) {
record.ios.user_id = p.second.user_id;
record.ios.uid_ios = p.second.uid_ios;
for (const auto& p_task : p.second.task_ios) {
if (!p_task.second.is_zero())
@ -256,13 +257,14 @@ void uid_monitor::add_records_locked(uint64_t curr_ts)
}
std::map<uint64_t, struct uid_records> uid_monitor::dump(
double hours, uint64_t threshold, bool force_report, UidIOUsage* uid_io_proto)
double hours, uint64_t threshold, bool force_report,
unordered_map<int, StoragedProto>* protos)
{
if (force_report) {
report(uid_io_proto);
report(protos);
}
std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
Mutex::Autolock _l(uidm_mutex);
std::map<uint64_t, struct uid_records> dump_records;
uint64_t first_ts = 0;
@ -310,12 +312,13 @@ void uid_monitor::update_curr_io_stats_locked()
for (const auto& it : uid_io_stats) {
const uid_info& uid = it.second;
if (curr_io_stats.find(uid.name) == curr_io_stats.end()) {
curr_io_stats[uid.name] = {};
curr_io_stats[uid.name] = {};
}
struct uid_io_usage& usage = curr_io_stats[uid.name];
usage.user_id = multiuser_get_user_id(uid.uid);
int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
last_uid_io_stats[uid.uid].io[FOREGROUND].read_bytes;
int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
@ -347,7 +350,7 @@ void uid_monitor::update_curr_io_stats_locked()
int64_t task_bg_wr_delta = task.io[BACKGROUND].write_bytes -
last_uid_io_stats[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
struct io_usage& task_usage = usage.task_ios[comm];
io_usage& task_usage = usage.task_ios[comm];
task_usage.bytes[READ][FOREGROUND][charger_stat] +=
(task_fg_rd_delta < 0) ? 0 : task_fg_rd_delta;
task_usage.bytes[READ][BACKGROUND][charger_stat] +=
@ -362,21 +365,21 @@ void uid_monitor::update_curr_io_stats_locked()
last_uid_io_stats = uid_io_stats;
}
void uid_monitor::report(UidIOUsage* proto)
void uid_monitor::report(unordered_map<int, StoragedProto>* protos)
{
if (!enabled()) return;
std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
Mutex::Autolock _l(uidm_mutex);
update_curr_io_stats_locked();
add_records_locked(time(NULL));
update_uid_io_proto(proto);
update_uid_io_proto(protos);
}
namespace {
void set_io_usage_proto(IOUsage* usage_proto, const struct io_usage& usage)
void set_io_usage_proto(IOUsage* usage_proto, const io_usage& usage)
{
usage_proto->set_rd_fg_chg_on(usage.bytes[READ][FOREGROUND][CHARGER_ON]);
usage_proto->set_rd_fg_chg_off(usage.bytes[READ][FOREGROUND][CHARGER_OFF]);
@ -388,7 +391,7 @@ void set_io_usage_proto(IOUsage* usage_proto, const struct io_usage& usage)
usage_proto->set_wr_bg_chg_off(usage.bytes[WRITE][BACKGROUND][CHARGER_OFF]);
}
void get_io_usage_proto(struct io_usage* usage, const IOUsage& io_proto)
void get_io_usage_proto(io_usage* usage, const IOUsage& io_proto)
{
usage->bytes[READ][FOREGROUND][CHARGER_ON] = io_proto.rd_fg_chg_on();
usage->bytes[READ][FOREGROUND][CHARGER_OFF] = io_proto.rd_fg_chg_off();
@ -402,31 +405,41 @@ void get_io_usage_proto(struct io_usage* usage, const IOUsage& io_proto)
} // namespace
void uid_monitor::update_uid_io_proto(UidIOUsage* uid_io_proto)
void uid_monitor::update_uid_io_proto(unordered_map<int, StoragedProto>* protos)
{
uid_io_proto->Clear();
for (auto it : *protos) {
it.second.mutable_uid_io_usage()->Clear();
}
for (const auto& item : io_history) {
const uint64_t& end_ts = item.first;
const struct uid_records& recs = item.second;
UidIOItem* item_proto = uid_io_proto->add_uid_io_items();
item_proto->set_end_ts(end_ts);
UidIORecords* recs_proto = item_proto->mutable_records();
recs_proto->set_start_ts(recs.start_ts);
unordered_map<userid_t, UidIOItem*> user_items;
for (const auto& entry : recs.entries) {
userid_t user_id = entry.ios.user_id;
UidIOItem* item_proto = user_items[user_id];
if (item_proto == nullptr) {
item_proto = (*protos)[user_id].mutable_uid_io_usage()
->add_uid_io_items();
user_items[user_id] = item_proto;
}
item_proto->set_end_ts(end_ts);
UidIORecords* recs_proto = item_proto->mutable_records();
recs_proto->set_start_ts(recs.start_ts);
UidRecord* rec_proto = recs_proto->add_entries();
rec_proto->set_uid_name(entry.name);
rec_proto->set_user_id(user_id);
IOUsage* uid_io_proto = rec_proto->mutable_uid_io();
const struct io_usage& uio_ios = entry.ios.uid_ios;
const io_usage& uio_ios = entry.ios.uid_ios;
set_io_usage_proto(uid_io_proto, uio_ios);
for (const auto& task_io : entry.ios.task_ios) {
const std::string& task_name = task_io.first;
const struct io_usage& task_ios = task_io.second;
const io_usage& task_ios = task_io.second;
TaskIOUsage* task_io_proto = rec_proto->add_task_io();
task_io_proto->set_task_name(task_name);
@ -448,6 +461,7 @@ void uid_monitor::load_uid_io_proto(const UidIOUsage& uid_io_proto)
for (const auto& rec_proto : records_proto.entries()) {
struct uid_record record;
record.name = rec_proto.uid_name();
record.ios.user_id = rec_proto.user_id();
get_io_usage_proto(&record.ios.uid_ios, rec_proto.uid_io());
for (const auto& task_io_proto : rec_proto.task_io()) {
@ -462,7 +476,7 @@ void uid_monitor::load_uid_io_proto(const UidIOUsage& uid_io_proto)
void uid_monitor::set_charger_state(charger_stat_t stat)
{
std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
Mutex::Autolock _l(uidm_mutex);
if (charger_stat == stat) {
return;
@ -481,12 +495,5 @@ void uid_monitor::init(charger_stat_t stat)
}
uid_monitor::uid_monitor()
: enable(!access(UID_IO_STATS_PATH, R_OK))
{
sem_init(&um_lock, 0, 1);
}
uid_monitor::~uid_monitor()
{
sem_destroy(&um_lock);
: enable(!access(UID_IO_STATS_PATH, R_OK)) {
}

View file

@ -121,4 +121,12 @@ void log_console_perf_history(const vector<int>& perf_history) {
std::copy(perf_history.begin() + start, perf_history.end(),
std::ostream_iterator<int>(line, " "));
printf("last 52 weeks : %s\n", line.str().c_str());
}
}
map<string, io_usage> merge_io_usage(const vector<uid_record>& entries) {
map<string, io_usage> merged_entries;
for (const auto& record : entries) {
merged_entries[record.name] += record.ios.uid_ios;
}
return merged_entries;
}

View file

@ -33,6 +33,7 @@
using namespace std;
using namespace chrono;
using namespace storaged_proto;
namespace {
@ -376,7 +377,7 @@ TEST(storaged_test, storage_info_t) {
for (int i = 0; i < 75; i++) {
tp += hours(5);
stp = {};
stp += duration_cast<seconds>(tp.time_since_epoch());
stp += duration_cast<chrono::seconds>(tp.time_since_epoch());
si.update_perf_history((i + 1) * 5, stp);
}
@ -406,3 +407,186 @@ TEST(storaged_test, storage_info_t) {
EXPECT_EQ(history[i], 0);
}
}
TEST(storaged_test, uid_monitor) {
uid_monitor uidm;
uidm.io_history[200] = {
.start_ts = 100,
.entries = {
{ "app1", {
.user_id = 0,
.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
}
},
{ "app2", {
.user_id = 0,
.uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 1000,
}
},
{ "app1", {
.user_id = 1,
.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
.uid_ios.bytes[READ][FOREGROUND][CHARGER_ON] = 1000,
}
},
},
};
uidm.io_history[300] = {
.start_ts = 200,
.entries = {
{ "app1", {
.user_id = 1,
.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF] = 1000,
}
},
{ "app3", {
.user_id = 0,
.uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF] = 1000,
}
},
},
};
StoragedProto proto_0;
UidIOItem* item = proto_0.mutable_uid_io_usage()->add_uid_io_items();
item->set_end_ts(200);
item->mutable_records()->set_start_ts(100);
UidRecord* rec = item->mutable_records()->add_entries();
rec->set_uid_name("app1");
rec->set_user_id(0);
rec->mutable_uid_io()->set_wr_fg_chg_on(1000);
unordered_map<int, StoragedProto> protos;
protos[0] = proto_0;
uidm.update_uid_io_proto(&protos);
EXPECT_EQ(protos.size(), 2U);
EXPECT_EQ(protos.count(0), 1UL);
EXPECT_EQ(protos.count(1), 1UL);
EXPECT_EQ(protos[0].uid_io_usage().uid_io_items_size(), 2);
const UidIOItem& user_0_item_0 = protos[0].uid_io_usage().uid_io_items(0);
EXPECT_EQ(user_0_item_0.end_ts(), 200UL);
EXPECT_EQ(user_0_item_0.records().start_ts(), 100UL);
EXPECT_EQ(user_0_item_0.records().entries_size(), 2);
EXPECT_EQ(user_0_item_0.records().entries(0).uid_name(), "app1");
EXPECT_EQ(user_0_item_0.records().entries(0).user_id(), 0UL);
EXPECT_EQ(user_0_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
EXPECT_EQ(user_0_item_0.records().entries(1).uid_name(), "app2");
EXPECT_EQ(user_0_item_0.records().entries(1).user_id(), 0UL);
EXPECT_EQ(user_0_item_0.records().entries(1).uid_io().rd_fg_chg_off(), 1000UL);
const UidIOItem& user_0_item_1 = protos[0].uid_io_usage().uid_io_items(1);
EXPECT_EQ(user_0_item_1.end_ts(), 300UL);
EXPECT_EQ(user_0_item_1.records().start_ts(), 200UL);
EXPECT_EQ(user_0_item_1.records().entries_size(), 1);
EXPECT_EQ(user_0_item_1.records().entries(0).uid_name(), "app3");
EXPECT_EQ(user_0_item_1.records().entries(0).user_id(), 0UL);
EXPECT_EQ(user_0_item_1.records().entries(0).uid_io().rd_bg_chg_off(), 1000UL);
EXPECT_EQ(protos[1].uid_io_usage().uid_io_items_size(), 2);
const UidIOItem& user_1_item_0 = protos[1].uid_io_usage().uid_io_items(0);
EXPECT_EQ(user_1_item_0.end_ts(), 200UL);
EXPECT_EQ(user_1_item_0.records().start_ts(), 100UL);
EXPECT_EQ(user_1_item_0.records().entries_size(), 1);
EXPECT_EQ(user_1_item_0.records().entries(0).uid_name(), "app1");
EXPECT_EQ(user_1_item_0.records().entries(0).user_id(), 1UL);
EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().rd_fg_chg_on(), 1000UL);
EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
const UidIOItem& user_1_item_1 = protos[1].uid_io_usage().uid_io_items(1);
EXPECT_EQ(user_1_item_1.end_ts(), 300UL);
EXPECT_EQ(user_1_item_1.records().start_ts(), 200UL);
EXPECT_EQ(user_1_item_1.records().entries_size(), 1);
EXPECT_EQ(user_1_item_1.records().entries(0).uid_name(), "app1");
EXPECT_EQ(user_1_item_1.records().entries(0).user_id(), 1UL);
EXPECT_EQ(user_1_item_1.records().entries(0).uid_io().wr_fg_chg_off(), 1000UL);
uidm.io_history.clear();
uidm.io_history[300] = {
.start_ts = 200,
.entries = {
{ "app1", {
.user_id = 0,
.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
}
},
},
};
uidm.io_history[400] = {
.start_ts = 300,
.entries = {
{ "app1", {
.user_id = 0,
.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
}
},
},
};
uidm.load_uid_io_proto(protos[0].uid_io_usage());
uidm.load_uid_io_proto(protos[1].uid_io_usage());
EXPECT_EQ(uidm.io_history.size(), 3UL);
EXPECT_EQ(uidm.io_history.count(200), 1UL);
EXPECT_EQ(uidm.io_history.count(300), 1UL);
EXPECT_EQ(uidm.io_history.count(400), 1UL);
EXPECT_EQ(uidm.io_history[200].start_ts, 100UL);
const vector<struct uid_record>& entries_0 = uidm.io_history[200].entries;
EXPECT_EQ(entries_0.size(), 3UL);
EXPECT_EQ(entries_0[0].name, "app1");
EXPECT_EQ(entries_0[0].ios.user_id, 0UL);
EXPECT_EQ(entries_0[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(entries_0[1].name, "app2");
EXPECT_EQ(entries_0[1].ios.user_id, 0UL);
EXPECT_EQ(entries_0[1].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
EXPECT_EQ(entries_0[2].name, "app1");
EXPECT_EQ(entries_0[2].ios.user_id, 1UL);
EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(uidm.io_history[300].start_ts, 200UL);
const vector<struct uid_record>& entries_1 = uidm.io_history[300].entries;
EXPECT_EQ(entries_1.size(), 3UL);
EXPECT_EQ(entries_1[0].name, "app1");
EXPECT_EQ(entries_1[0].ios.user_id, 0UL);
EXPECT_EQ(entries_1[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(entries_1[1].name, "app3");
EXPECT_EQ(entries_1[1].ios.user_id, 0UL);
EXPECT_EQ(entries_1[1].ios.uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
EXPECT_EQ(entries_1[2].name, "app1");
EXPECT_EQ(entries_1[2].ios.user_id, 1UL);
EXPECT_EQ(entries_1[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
EXPECT_EQ(uidm.io_history[400].start_ts, 300UL);
const vector<struct uid_record>& entries_2 = uidm.io_history[400].entries;
EXPECT_EQ(entries_2.size(), 1UL);
EXPECT_EQ(entries_2[0].name, "app1");
EXPECT_EQ(entries_2[0].ios.user_id, 0UL);
EXPECT_EQ(entries_2[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
map<string, io_usage> merged_entries_0 = merge_io_usage(entries_0);
EXPECT_EQ(merged_entries_0.size(), 2UL);
EXPECT_EQ(merged_entries_0.count("app1"), 1UL);
EXPECT_EQ(merged_entries_0.count("app2"), 1UL);
EXPECT_EQ(merged_entries_0["app1"].bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(merged_entries_0["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 2000UL);
EXPECT_EQ(merged_entries_0["app2"].bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
map<string, io_usage> merged_entries_1 = merge_io_usage(entries_1);
EXPECT_EQ(merged_entries_1.size(), 2UL);
EXPECT_EQ(merged_entries_1.count("app1"), 1UL);
EXPECT_EQ(merged_entries_1.count("app3"), 1UL);
EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(merged_entries_1["app3"].bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
map<string, io_usage> merged_entries_2 = merge_io_usage(entries_2);
EXPECT_EQ(merged_entries_2.size(), 1UL);
EXPECT_EQ(merged_entries_2.count("app1"), 1UL);
EXPECT_EQ(merged_entries_2["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
}