Merge "metricsd: Make the unit tests pass."

This commit is contained in:
Bertrand Simonnet 2015-09-02 21:26:40 +00:00 committed by Gerrit Code Review
commit 74f6f8c323
13 changed files with 281 additions and 425 deletions

View file

@ -41,6 +41,28 @@ metrics_daemon_sources := \
serialization/metric_sample.cc \
serialization/serialization_utils.cc
metrics_tests_sources := \
metrics_daemon.cc \
metrics_daemon_test.cc \
metrics_library_test.cc \
persistent_integer.cc \
persistent_integer_test.cc \
serialization/metric_sample.cc \
serialization/serialization_utils.cc \
serialization/serialization_utils_unittest.cc \
timer.cc \
timer_test.cc \
uploader/metrics_hashes.cc \
uploader/metrics_hashes_unittest.cc \
uploader/metrics_log_base.cc \
uploader/metrics_log_base_unittest.cc \
uploader/metrics_log.cc \
uploader/mock/sender_mock.cc \
uploader/sender_http.cc \
uploader/system_profile_cache.cc \
uploader/upload_service.cc \
uploader/upload_service_test.cc \
metrics_CFLAGS := -Wall \
-Wno-char-subscripts \
-Wno-missing-field-initializers \
@ -125,4 +147,25 @@ LOCAL_SRC_FILES := init.$(LOCAL_INIT_SERVICE).rc
include $(BUILD_PREBUILT)
endif # INITRC_TEMPLATE
# Unit tests for metrics.
# ========================================================
include $(CLEAR_VARS)
LOCAL_MODULE := metrics_tests
LOCAL_CFLAGS := $(metrics_CFLAGS)
LOCAL_CPP_EXTENSION := $(metrics_cpp_extension)
LOCAL_CPPFLAGS := $(metrics_CPPFLAGS) -Wno-sign-compare
LOCAL_RTTI_FLAG := -frtti
LOCAL_SHARED_LIBRARIES := $(metrics_shared_libraries) \
libmetrics \
libprotobuf-cpp-lite \
libchromeos-http \
libchromeos-dbus \
libcutils \
libdbus \
LOCAL_SRC_FILES := $(metrics_tests_sources)
LOCAL_STATIC_LIBRARIES := libBionicGtestMain libgmock metrics_daemon_protos
include $(BUILD_NATIVE_TEST)
endif # HOST_OS == linux

View file

@ -129,6 +129,9 @@ class MetricsLibrary : public MetricsLibraryInterface {
FRIEND_TEST(MetricsLibraryTest, SendMessageToChrome);
FRIEND_TEST(MetricsLibraryTest, SendMessageToChromeUMAEventsBadFileLocation);
void InitForTest(const std::string& uma_events_file,
const std::string& consent_file);
// Sets |*result| to whether or not the |mounts_file| indicates that
// the |device_name| is currently mounted. Uses |buffer| of
// |buffer_size| to read the file. Returns false if any error.

View file

@ -195,10 +195,10 @@ int MetricsDaemon::Run() {
}
void MetricsDaemon::RunUploaderTest() {
upload_service_.reset(new UploadService(new SystemProfileCache(true,
config_root_),
metrics_lib_,
server_));
upload_service_.reset(new UploadService(
new SystemProfileCache(true, base::FilePath(config_root_)),
metrics_lib_,
server_));
upload_service_->Init(upload_interval_, metrics_file_);
upload_service_->UploadEvent();
}

View file

@ -22,11 +22,12 @@
#include <base/at_exit.h>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/stringprintf.h>
#include <chromeos/dbus/service_constants.h>
#include <gtest/gtest.h>
#include "constants.h"
#include "metrics_daemon.h"
#include "metrics_library_mock.h"
#include "persistent_integer_mock.h"
@ -45,7 +46,6 @@ using ::testing::Return;
using ::testing::StrictMock;
using chromeos_metrics::PersistentIntegerMock;
static const char kFakeDiskStatsName[] = "fake-disk-stats";
static const char kFakeDiskStatsFormat[] =
" 1793 1788 %" PRIu64 " 105580 "
" 196 175 %" PRIu64 " 30290 "
@ -53,9 +53,6 @@ static const char kFakeDiskStatsFormat[] =
static const uint64_t kFakeReadSectors[] = {80000, 100000};
static const uint64_t kFakeWriteSectors[] = {3000, 4000};
static const char kFakeVmStatsName[] = "fake-vm-stats";
static const char kFakeScalingMaxFreqPath[] = "fake-scaling-max-freq";
static const char kFakeCpuinfoMaxFreqPath[] = "fake-cpuinfo-max-freq";
class MetricsDaemonTest : public testing::Test {
protected:
@ -63,78 +60,35 @@ class MetricsDaemonTest : public testing::Test {
std::string kFakeDiskStats1;
virtual void SetUp() {
EXPECT_TRUE(temp_dir_.CreateUniqueTempDir());
scaling_max_freq_path_ = temp_dir_.path().Append("scaling_max");
cpu_max_freq_path_ = temp_dir_.path().Append("cpu_freq_max");
disk_stats_path_ = temp_dir_.path().Append("disk_stats");
kFakeDiskStats0 = base::StringPrintf(kFakeDiskStatsFormat,
kFakeReadSectors[0],
kFakeWriteSectors[0]);
kFakeDiskStats1 = base::StringPrintf(kFakeDiskStatsFormat,
kFakeReadSectors[1],
kFakeWriteSectors[1]);
CreateFakeDiskStatsFile(kFakeDiskStats0.c_str());
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), 10000000);
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 10000000);
chromeos_metrics::PersistentInteger::SetTestingMode(true);
CreateFakeDiskStatsFile(kFakeDiskStats0);
CreateUint64ValueFile(cpu_max_freq_path_, 10000000);
CreateUint64ValueFile(scaling_max_freq_path_, 10000000);
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
temp_dir_.path().value());
daemon_.Init(true,
false,
true,
&metrics_lib_,
kFakeDiskStatsName,
kFakeVmStatsName,
kFakeScalingMaxFreqPath,
kFakeCpuinfoMaxFreqPath,
disk_stats_path_.value(),
scaling_max_freq_path_.value(),
cpu_max_freq_path_.value(),
base::TimeDelta::FromMinutes(30),
kMetricsServer,
kMetricsFilePath,
metrics::kMetricsServer,
metrics::kMetricsEventsFilePath,
"/");
// Replace original persistent values with mock ones.
daily_active_use_mock_ =
new StrictMock<PersistentIntegerMock>("1.mock");
daemon_.daily_active_use_.reset(daily_active_use_mock_);
kernel_crash_interval_mock_ =
new StrictMock<PersistentIntegerMock>("2.mock");
daemon_.kernel_crash_interval_.reset(kernel_crash_interval_mock_);
user_crash_interval_mock_ =
new StrictMock<PersistentIntegerMock>("3.mock");
daemon_.user_crash_interval_.reset(user_crash_interval_mock_);
unclean_shutdown_interval_mock_ =
new StrictMock<PersistentIntegerMock>("4.mock");
daemon_.unclean_shutdown_interval_.reset(unclean_shutdown_interval_mock_);
}
virtual void TearDown() {
EXPECT_EQ(0, unlink(kFakeDiskStatsName));
EXPECT_EQ(0, unlink(kFakeScalingMaxFreqPath));
EXPECT_EQ(0, unlink(kFakeCpuinfoMaxFreqPath));
}
// Adds active use aggregation counters update expectations that the
// specified count will be added.
void ExpectActiveUseUpdate(int count) {
EXPECT_CALL(*daily_active_use_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*kernel_crash_interval_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*user_crash_interval_mock_, Add(count))
.Times(1)
.RetiresOnSaturation();
}
// As above, but ignore values of counter updates.
void IgnoreActiveUseUpdate() {
EXPECT_CALL(*daily_active_use_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*kernel_crash_interval_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*user_crash_interval_mock_, Add(_))
.Times(1)
.RetiresOnSaturation();
}
// Adds a metrics library mock expectation that the specified metric
@ -177,19 +131,15 @@ class MetricsDaemonTest : public testing::Test {
}
// Creates or overwrites an input file containing fake disk stats.
void CreateFakeDiskStatsFile(const char* fake_stats) {
if (unlink(kFakeDiskStatsName) < 0) {
EXPECT_EQ(errno, ENOENT);
}
FILE* f = fopen(kFakeDiskStatsName, "w");
EXPECT_EQ(1, fwrite(fake_stats, strlen(fake_stats), 1, f));
EXPECT_EQ(0, fclose(f));
void CreateFakeDiskStatsFile(const string& fake_stats) {
EXPECT_EQ(base::WriteFile(disk_stats_path_,
fake_stats.data(), fake_stats.size()),
fake_stats.size());
}
// Creates or overwrites the file in |path| so that it contains the printable
// representation of |value|.
void CreateUint64ValueFile(const base::FilePath& path, uint64_t value) {
base::DeleteFile(path, false);
std::string value_string = base::Uint64ToString(value);
ASSERT_EQ(value_string.length(),
base::WriteFile(path, value_string.c_str(),
@ -199,29 +149,19 @@ class MetricsDaemonTest : public testing::Test {
// The MetricsDaemon under test.
MetricsDaemon daemon_;
// Temporary directory used for tests.
base::ScopedTempDir temp_dir_;
// Path for the fake files.
base::FilePath scaling_max_freq_path_;
base::FilePath cpu_max_freq_path_;
base::FilePath disk_stats_path_;
// Mocks. They are strict mock so that all unexpected
// calls are marked as failures.
StrictMock<MetricsLibraryMock> metrics_lib_;
StrictMock<PersistentIntegerMock>* daily_active_use_mock_;
StrictMock<PersistentIntegerMock>* kernel_crash_interval_mock_;
StrictMock<PersistentIntegerMock>* user_crash_interval_mock_;
StrictMock<PersistentIntegerMock>* unclean_shutdown_interval_mock_;
};
TEST_F(MetricsDaemonTest, CheckSystemCrash) {
static const char kKernelCrashDetected[] = "test-kernel-crash-detected";
EXPECT_FALSE(daemon_.CheckSystemCrash(kKernelCrashDetected));
base::FilePath crash_detected(kKernelCrashDetected);
base::WriteFile(crash_detected, "", 0);
EXPECT_TRUE(base::PathExists(crash_detected));
EXPECT_TRUE(daemon_.CheckSystemCrash(kKernelCrashDetected));
EXPECT_FALSE(base::PathExists(crash_detected));
EXPECT_FALSE(daemon_.CheckSystemCrash(kKernelCrashDetected));
EXPECT_FALSE(base::PathExists(crash_detected));
base::DeleteFile(crash_detected, false);
}
TEST_F(MetricsDaemonTest, MessageFilter) {
// Ignore calls to SendToUMA.
EXPECT_CALL(metrics_lib_, SendToUMA(_, _, _, _, _)).Times(AnyNumber());
@ -232,7 +172,6 @@ TEST_F(MetricsDaemonTest, MessageFilter) {
EXPECT_EQ(DBUS_HANDLER_RESULT_NOT_YET_HANDLED, res);
DeleteDBusMessage(msg);
IgnoreActiveUseUpdate();
vector<string> signal_args;
msg = NewDBusSignalString("/",
"org.chromium.CrashReporter",
@ -260,25 +199,6 @@ TEST_F(MetricsDaemonTest, SendSample) {
/* min */ 1, /* max */ 100, /* buckets */ 50);
}
TEST_F(MetricsDaemonTest, ReportDiskStats) {
uint64_t read_sectors_now, write_sectors_now;
CreateFakeDiskStatsFile(kFakeDiskStats1.c_str());
daemon_.DiskStatsReadStats(&read_sectors_now, &write_sectors_now);
EXPECT_EQ(read_sectors_now, kFakeReadSectors[1]);
EXPECT_EQ(write_sectors_now, kFakeWriteSectors[1]);
MetricsDaemon::StatsState s_state = daemon_.stats_state_;
EXPECT_CALL(metrics_lib_,
SendToUMA(_, (kFakeReadSectors[1] - kFakeReadSectors[0]) / 30,
_, _, _));
EXPECT_CALL(metrics_lib_,
SendToUMA(_, (kFakeWriteSectors[1] - kFakeWriteSectors[0]) / 30,
_, _, _));
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, _, _)); // SendCpuThrottleMetrics
daemon_.StatsCallback();
EXPECT_TRUE(s_state != daemon_.stats_state_);
}
TEST_F(MetricsDaemonTest, ProcessMeminfo) {
string meminfo =
"MemTotal: 2000000 kB\nMemFree: 500000 kB\n"
@ -337,24 +257,24 @@ TEST_F(MetricsDaemonTest, ReadFreqToInt) {
const int fake_max_freq = 2000000;
int scaled_freq = 0;
int max_freq = 0;
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath),
fake_scaled_freq);
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), fake_max_freq);
CreateUint64ValueFile(scaling_max_freq_path_, fake_scaled_freq);
CreateUint64ValueFile(cpu_max_freq_path_, fake_max_freq);
EXPECT_TRUE(daemon_.testing_);
EXPECT_TRUE(daemon_.ReadFreqToInt(kFakeScalingMaxFreqPath, &scaled_freq));
EXPECT_TRUE(daemon_.ReadFreqToInt(kFakeCpuinfoMaxFreqPath, &max_freq));
EXPECT_TRUE(daemon_.ReadFreqToInt(scaling_max_freq_path_.value(),
&scaled_freq));
EXPECT_TRUE(daemon_.ReadFreqToInt(cpu_max_freq_path_.value(), &max_freq));
EXPECT_EQ(fake_scaled_freq, scaled_freq);
EXPECT_EQ(fake_max_freq, max_freq);
}
TEST_F(MetricsDaemonTest, SendCpuThrottleMetrics) {
CreateUint64ValueFile(base::FilePath(kFakeCpuinfoMaxFreqPath), 2001000);
CreateUint64ValueFile(cpu_max_freq_path_, 2001000);
// Test the 101% and 100% cases.
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 2001000);
CreateUint64ValueFile(scaling_max_freq_path_, 2001000);
EXPECT_TRUE(daemon_.testing_);
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 101, 101));
daemon_.SendCpuThrottleMetrics();
CreateUint64ValueFile(base::FilePath(kFakeScalingMaxFreqPath), 2000000);
CreateUint64ValueFile(scaling_max_freq_path_, 2000000);
EXPECT_CALL(metrics_lib_, SendEnumToUMA(_, 100, 101));
daemon_.SendCpuThrottleMetrics();
}
@ -370,12 +290,14 @@ TEST_F(MetricsDaemonTest, SendZramMetrics) {
const uint64_t page_size = 4096;
const uint64_t zero_pages = 10 * 1000 * 1000 / page_size;
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kComprDataSizeName),
compr_data_size);
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kOrigDataSizeName),
orig_data_size);
CreateUint64ValueFile(base::FilePath(MetricsDaemon::kZeroPagesName),
zero_pages);
CreateUint64ValueFile(
temp_dir_.path().Append(MetricsDaemon::kComprDataSizeName),
compr_data_size);
CreateUint64ValueFile(
temp_dir_.path().Append(MetricsDaemon::kOrigDataSizeName),
orig_data_size);
CreateUint64ValueFile(
temp_dir_.path().Append(MetricsDaemon::kZeroPagesName), zero_pages);
const uint64_t real_orig_size = orig_data_size + zero_pages * page_size;
const uint64_t zero_ratio_percent =
@ -390,11 +312,5 @@ TEST_F(MetricsDaemonTest, SendZramMetrics) {
EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_pages, _, _, _));
EXPECT_CALL(metrics_lib_, SendToUMA(_, zero_ratio_percent, _, _, _));
EXPECT_TRUE(daemon_.ReportZram(base::FilePath(".")));
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
EXPECT_TRUE(daemon_.ReportZram(temp_dir_.path()));
}

View file

@ -140,6 +140,12 @@ void MetricsLibrary::Init() {
uma_events_file_ = metrics::kMetricsEventsFilePath;
}
void MetricsLibrary::InitForTest(const std::string& uma_events_file,
const std::string& consent_file) {
uma_events_file_ = uma_events_file;
consent_file_ = consent_file;
}
bool MetricsLibrary::SendToUMA(const std::string& name,
int sample,
int min,

View file

@ -14,130 +14,52 @@
* limitations under the License.
*/
#include <cstring>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <policy/mock_device_policy.h>
#include <policy/libpolicy.h>
#include "metrics/c_metrics_library.h"
#include "metrics/metrics_library.h"
using base::FilePath;
using ::testing::_;
using ::testing::Return;
using ::testing::AnyNumber;
static const FilePath kTestUMAEventsFile("test-uma-events");
static const char kTestMounts[] = "test-mounts";
ACTION_P(SetMetricsPolicy, enabled) {
*arg0 = enabled;
return true;
}
class MetricsLibraryTest : public testing::Test {
protected:
virtual void SetUp() {
EXPECT_TRUE(lib_.uma_events_file_.empty());
lib_.Init();
EXPECT_FALSE(lib_.uma_events_file_.empty());
lib_.uma_events_file_ = kTestUMAEventsFile.value();
EXPECT_EQ(0, WriteFile(kTestUMAEventsFile, "", 0));
device_policy_ = new policy::MockDevicePolicy();
EXPECT_CALL(*device_policy_, LoadPolicy())
.Times(AnyNumber())
.WillRepeatedly(Return(true));
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.Times(AnyNumber())
.WillRepeatedly(SetMetricsPolicy(true));
provider_ = new policy::PolicyProvider(device_policy_);
lib_.SetPolicyProvider(provider_);
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
consent_file_ = temp_dir_.path().Append("consent");
uma_events_file_ = temp_dir_.path().Append("events");
lib_.InitForTest(uma_events_file_.value(), consent_file_.value());
EXPECT_EQ(0, WriteFile(uma_events_file_, "", 0));
// Defeat metrics enabled caching between tests.
lib_.cached_enabled_time_ = 0;
}
virtual void TearDown() {
base::DeleteFile(FilePath(kTestMounts), false);
base::DeleteFile(kTestUMAEventsFile, false);
void SetMetricsConsent(bool enabled) {
if (enabled) {
ASSERT_EQ(base::WriteFile(consent_file_, "", 0), 0);
} else {
ASSERT_TRUE(base::DeleteFile(consent_file_, false));
}
}
void VerifyEnabledCacheHit(bool to_value);
void VerifyEnabledCacheEviction(bool to_value);
MetricsLibrary lib_;
policy::MockDevicePolicy* device_policy_;
policy::PolicyProvider* provider_;
base::ScopedTempDir temp_dir_;
base::FilePath consent_file_;
base::FilePath uma_events_file_;
};
TEST_F(MetricsLibraryTest, IsDeviceMounted) {
static const char kTestContents[] =
"0123456789abcde 0123456789abcde\nguestfs foo bar\n";
char buffer[1024];
int block_sizes[] = { 1, 2, 3, 4, 5, 6, 8, 12, 14, 16, 32, 1024 };
bool result;
EXPECT_FALSE(lib_.IsDeviceMounted("guestfs",
"nonexistent",
buffer,
1,
&result));
ASSERT_TRUE(base::WriteFile(base::FilePath(kTestMounts),
kTestContents,
strlen(kTestContents)));
EXPECT_FALSE(lib_.IsDeviceMounted("guestfs",
kTestMounts,
buffer,
0,
&result));
for (size_t i = 0; i < arraysize(block_sizes); ++i) {
EXPECT_TRUE(lib_.IsDeviceMounted("0123456789abcde",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_TRUE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("guestfs",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_TRUE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("0123456",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("9abcde",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("foo",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
EXPECT_TRUE(lib_.IsDeviceMounted("bar",
kTestMounts,
buffer,
block_sizes[i],
&result));
EXPECT_FALSE(result);
}
}
TEST_F(MetricsLibraryTest, AreMetricsEnabledFalse) {
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(false));
SetMetricsConsent(false);
EXPECT_FALSE(lib_.AreMetricsEnabled());
}
TEST_F(MetricsLibraryTest, AreMetricsEnabledTrue) {
SetMetricsConsent(true);
EXPECT_TRUE(lib_.AreMetricsEnabled());
}
@ -146,12 +68,12 @@ void MetricsLibraryTest::VerifyEnabledCacheHit(bool to_value) {
// times in a row.
for (int i = 0; i < 100; ++i) {
lib_.cached_enabled_time_ = 0;
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(!to_value));
ASSERT_EQ(!to_value, lib_.AreMetricsEnabled());
ON_CALL(*device_policy_, GetMetricsEnabled(_))
.WillByDefault(SetMetricsPolicy(to_value));
if (lib_.AreMetricsEnabled() == !to_value)
SetMetricsConsent(to_value);
lib_.AreMetricsEnabled();
// If we check the metrics status twice in a row, we use the cached value
// the second time.
SetMetricsConsent(!to_value);
if (lib_.AreMetricsEnabled() == to_value)
return;
}
ADD_FAILURE() << "Did not see evidence of caching";
@ -159,14 +81,12 @@ void MetricsLibraryTest::VerifyEnabledCacheHit(bool to_value) {
void MetricsLibraryTest::VerifyEnabledCacheEviction(bool to_value) {
lib_.cached_enabled_time_ = 0;
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(!to_value));
SetMetricsConsent(!to_value);
ASSERT_EQ(!to_value, lib_.AreMetricsEnabled());
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(to_value));
ASSERT_LT(abs(static_cast<int>(time(nullptr) - lib_.cached_enabled_time_)),
5);
// Sleep one second (or cheat to be faster).
SetMetricsConsent(to_value);
// Sleep one second (or cheat to be faster) and check that we are not using
// the cached value.
--lib_.cached_enabled_time_;
ASSERT_EQ(to_value, lib_.AreMetricsEnabled());
}
@ -177,50 +97,3 @@ TEST_F(MetricsLibraryTest, AreMetricsEnabledCaching) {
VerifyEnabledCacheEviction(false);
VerifyEnabledCacheEviction(true);
}
class CMetricsLibraryTest : public testing::Test {
protected:
virtual void SetUp() {
lib_ = CMetricsLibraryNew();
MetricsLibrary& ml = *reinterpret_cast<MetricsLibrary*>(lib_);
EXPECT_TRUE(ml.uma_events_file_.empty());
CMetricsLibraryInit(lib_);
EXPECT_FALSE(ml.uma_events_file_.empty());
ml.uma_events_file_ = kTestUMAEventsFile.value();
EXPECT_EQ(0, WriteFile(kTestUMAEventsFile, "", 0));
device_policy_ = new policy::MockDevicePolicy();
EXPECT_CALL(*device_policy_, LoadPolicy())
.Times(AnyNumber())
.WillRepeatedly(Return(true));
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.Times(AnyNumber())
.WillRepeatedly(SetMetricsPolicy(true));
provider_ = new policy::PolicyProvider(device_policy_);
ml.SetPolicyProvider(provider_);
reinterpret_cast<MetricsLibrary*>(lib_)->cached_enabled_time_ = 0;
}
virtual void TearDown() {
CMetricsLibraryDelete(lib_);
base::DeleteFile(kTestUMAEventsFile, false);
}
CMetricsLibrary lib_;
policy::MockDevicePolicy* device_policy_;
policy::PolicyProvider* provider_;
};
TEST_F(CMetricsLibraryTest, AreMetricsEnabledFalse) {
EXPECT_CALL(*device_policy_, GetMetricsEnabled(_))
.WillOnce(SetMetricsPolicy(false));
EXPECT_FALSE(CMetricsLibraryAreMetricsEnabled(lib_));
}
TEST_F(CMetricsLibraryTest, AreMetricsEnabledTrue) {
EXPECT_TRUE(CMetricsLibraryAreMetricsEnabled(lib_));
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -28,18 +28,14 @@
namespace chromeos_metrics {
// Static class member instantiation.
bool PersistentInteger::testing_ = false;
std::string PersistentInteger::metrics_directory_ = metrics::kMetricsDirectory;
PersistentInteger::PersistentInteger(const std::string& name) :
value_(0),
version_(kVersion),
name_(name),
synced_(false) {
if (testing_) {
backing_file_name_ = name_;
} else {
backing_file_name_ = metrics::kMetricsDirectory + name_;
}
backing_file_name_ = metrics_directory_ + name_;
}
PersistentInteger::~PersistentInteger() {}
@ -100,8 +96,8 @@ bool PersistentInteger::Read() {
return read_succeeded;
}
void PersistentInteger::SetTestingMode(bool testing) {
testing_ = testing;
void PersistentInteger::SetMetricsDirectory(const std::string& directory) {
metrics_directory_ = directory;
}

View file

@ -50,10 +50,9 @@ class PersistentInteger {
// Virtual only because of mock.
virtual void Add(int64_t x);
// After calling with |testing| = true, changes some behavior for the purpose
// of testing. For instance: instances created while testing use the current
// directory for the backing files.
static void SetTestingMode(bool testing);
// Sets the directory path for all persistent integers.
// This is used in unittests to change where the counters are stored.
static void SetMetricsDirectory(const std::string& directory);
private:
static const int kVersion = 1001;
@ -70,8 +69,8 @@ class PersistentInteger {
int32_t version_;
std::string name_;
std::string backing_file_name_;
static std::string metrics_directory_;
bool synced_;
static bool testing_;
};
} // namespace chromeos_metrics

View file

@ -19,6 +19,7 @@
#include <base/compiler_specific.h>
#include <base/files/file_enumerator.h>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include "persistent_integer.h"
@ -30,7 +31,9 @@ using chromeos_metrics::PersistentInteger;
class PersistentIntegerTest : public testing::Test {
void SetUp() override {
// Set testing mode.
chromeos_metrics::PersistentInteger::SetTestingMode(true);
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
temp_dir_.path().value());
}
void TearDown() override {
@ -45,6 +48,8 @@ class PersistentIntegerTest : public testing::Test {
base::DeleteFile(name, false);
}
}
base::ScopedTempDir temp_dir_;
};
TEST_F(PersistentIntegerTest, BasicChecks) {
@ -71,8 +76,3 @@ TEST_F(PersistentIntegerTest, BasicChecks) {
pi.reset(new PersistentInteger(kBackingFileName));
EXPECT_EQ(0, pi->Get());
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -26,7 +26,9 @@ class ChromeUserMetricsExtension;
// Mock profile setter used for testing.
class MockSystemProfileSetter : public SystemProfileSetter {
public:
void Populate(metrics::ChromeUserMetricsExtension* profile_proto) override {}
bool Populate(metrics::ChromeUserMetricsExtension* profile_proto) override {
return true;
}
};
#endif // METRICS_UPLOADER_MOCK_MOCK_SYSTEM_PROFILE_SETTER_H_

View file

@ -61,7 +61,7 @@ SystemProfileCache::SystemProfileCache()
}
SystemProfileCache::SystemProfileCache(bool testing,
const std::string& config_root)
const base::FilePath& config_root)
: initialized_(false),
testing_(testing),
config_root_(config_root),
@ -73,9 +73,7 @@ bool SystemProfileCache::Initialize() {
CHECK(!initialized_)
<< "this should be called only once in the metrics_daemon lifetime.";
char property_value[PROPERTY_VALUE_MAX];
property_get(metrics::kBuildTargetIdProperty, property_value, "");
profile_.build_target_id = std::string(property_value);
profile_.build_target_id = GetProperty(metrics::kBuildTargetIdProperty);
if (profile_.build_target_id.empty()) {
LOG(ERROR) << "System property " << metrics::kBuildTargetIdProperty
@ -83,11 +81,8 @@ bool SystemProfileCache::Initialize() {
return false;
}
property_get(metrics::kChannelProperty, property_value, "");
std::string channel(property_value);
property_get(metrics::kProductVersionProperty, property_value, "");
profile_.version = std::string(property_value);
std::string channel = GetProperty(metrics::kChannelProperty);
profile_.version = GetProperty(metrics::kProductVersionProperty);
if (channel.empty() || profile_.version.empty()) {
// If the channel or version is missing, the image is not official.
@ -157,6 +152,18 @@ std::string SystemProfileCache::GetPersistentGUID(
return guid;
}
std::string SystemProfileCache::GetProperty(const std::string& name) {
if (testing_) {
std::string content;
base::ReadFileToString(config_root_.Append(name), &content);
return content;
} else {
char value[PROPERTY_VALUE_MAX];
property_get(name.data(), value, "");
return std::string(value);
}
}
metrics::SystemProfileProto_Channel SystemProfileCache::ProtoChannelFromString(
const std::string& channel) {
if (channel == "stable") {

View file

@ -22,6 +22,7 @@
#include <string>
#include "base/compiler_specific.h"
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
#include "persistent_integer.h"
@ -49,7 +50,7 @@ class SystemProfileCache : public SystemProfileSetter {
public:
SystemProfileCache();
SystemProfileCache(bool testing, const std::string& config_root);
SystemProfileCache(bool testing, const base::FilePath& config_root);
// Populates the ProfileSystem protobuf with system information.
bool Populate(metrics::ChromeUserMetricsExtension* metrics_proto) override;
@ -75,9 +76,14 @@ class SystemProfileCache : public SystemProfileSetter {
// Initializes |profile_| only if it has not been yet initialized.
bool InitializeOrCheck();
// Gets a system property as a string.
// When |testing_| is true, reads the value from |config_root_|/|name|
// instead.
std::string GetProperty(const std::string& name);
bool initialized_;
bool testing_;
std::string config_root_;
base::FilePath config_root_;
scoped_ptr<chromeos_metrics::PersistentInteger> session_id_;
SystemProfile profile_;
};

View file

@ -16,11 +16,13 @@
#include <gtest/gtest.h>
#include "base/at_exit.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/logging.h"
#include "base/sys_info.h"
#include <base/at_exit.h>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
#include <base/logging.h>
#include <base/sys_info.h>
#include "constants.h"
#include "metrics_library_mock.h"
#include "serialization/metric_sample.h"
#include "uploader/metrics_log.h"
@ -34,35 +36,35 @@
class UploadServiceTest : public testing::Test {
protected:
UploadServiceTest()
: cache_(true, "/"),
upload_service_(new MockSystemProfileSetter(), &metrics_lib_,
kMetricsServer, true),
exit_manager_(new base::AtExitManager()) {
sender_ = new SenderMock;
upload_service_.sender_.reset(sender_);
upload_service_.Init(base::TimeDelta::FromMinutes(30), kMetricsFilePath);
}
virtual void SetUp() {
CHECK(dir_.CreateUniqueTempDir());
upload_service_.GatherHistograms();
upload_service_.Reset();
sender_->Reset();
upload_service_.reset(new UploadService(new MockSystemProfileSetter(),
&metrics_lib_, "", true));
chromeos_metrics::PersistentInteger::SetTestingMode(true);
cache_.session_id_.reset(new chromeos_metrics::PersistentInteger(
dir_.path().Append("session_id").value()));
upload_service_->sender_.reset(new SenderMock);
event_file_ = dir_.path().Append("event");
upload_service_->Init(base::TimeDelta::FromMinutes(30), event_file_.value());
upload_service_->GatherHistograms();
upload_service_->Reset();
chromeos_metrics::PersistentInteger::SetMetricsDirectory(
dir_.path().value());
}
scoped_ptr<metrics::MetricSample> Crash(const std::string& name) {
return metrics::MetricSample::CrashSample(name);
}
void SetTestingProperty(const std::string& name, const std::string& value) {
ASSERT_EQ(
value.size(),
base::WriteFile(dir_.path().Append(name), value.data(), value.size()));
}
base::FilePath event_file_;
base::ScopedTempDir dir_;
SenderMock* sender_;
SystemProfileCache cache_;
UploadService upload_service_;
scoped_ptr<UploadService> upload_service_;
MetricsLibraryMock metrics_lib_;
scoped_ptr<base::AtExitManager> exit_manager_;
@ -70,18 +72,18 @@ class UploadServiceTest : public testing::Test {
// Tests that the right crash increments a values.
TEST_F(UploadServiceTest, LogUserCrash) {
upload_service_.AddSample(*Crash("user").get());
upload_service_->AddSample(*Crash("user").get());
MetricsLog* log = upload_service_.current_log_.get();
MetricsLog* log = upload_service_->current_log_.get();
metrics::ChromeUserMetricsExtension* proto = log->uma_proto();
EXPECT_EQ(1, proto->system_profile().stability().other_user_crash_count());
}
TEST_F(UploadServiceTest, LogUncleanShutdown) {
upload_service_.AddSample(*Crash("uncleanshutdown"));
upload_service_->AddSample(*Crash("uncleanshutdown"));
EXPECT_EQ(1, upload_service_.current_log_
EXPECT_EQ(1, upload_service_->current_log_
->uma_proto()
->system_profile()
.stability()
@ -89,9 +91,9 @@ TEST_F(UploadServiceTest, LogUncleanShutdown) {
}
TEST_F(UploadServiceTest, LogKernelCrash) {
upload_service_.AddSample(*Crash("kernel"));
upload_service_->AddSample(*Crash("kernel"));
EXPECT_EQ(1, upload_service_.current_log_
EXPECT_EQ(1, upload_service_->current_log_
->uma_proto()
->system_profile()
.stability()
@ -99,47 +101,56 @@ TEST_F(UploadServiceTest, LogKernelCrash) {
}
TEST_F(UploadServiceTest, UnknownCrashIgnored) {
upload_service_.AddSample(*Crash("foo"));
upload_service_->AddSample(*Crash("foo"));
// The log should be empty.
EXPECT_FALSE(upload_service_.current_log_);
EXPECT_FALSE(upload_service_->current_log_);
}
TEST_F(UploadServiceTest, FailedSendAreRetried) {
sender_->set_should_succeed(false);
SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
upload_service_.AddSample(*Crash("user"));
upload_service_.UploadEvent();
EXPECT_EQ(1, sender_->send_call_count());
std::string sent_string = sender_->last_message();
sender->set_should_succeed(false);
upload_service_.UploadEvent();
EXPECT_EQ(2, sender_->send_call_count());
EXPECT_EQ(sent_string, sender_->last_message());
upload_service_->AddSample(*Crash("user"));
upload_service_->UploadEvent();
EXPECT_EQ(1, sender->send_call_count());
std::string sent_string = sender->last_message();
upload_service_->UploadEvent();
EXPECT_EQ(2, sender->send_call_count());
EXPECT_EQ(sent_string, sender->last_message());
}
TEST_F(UploadServiceTest, DiscardLogsAfterTooManyFailedUpload) {
sender_->set_should_succeed(false);
upload_service_.AddSample(*Crash("user"));
SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
sender->set_should_succeed(false);
upload_service_->AddSample(*Crash("user"));
for (int i = 0; i < UploadService::kMaxFailedUpload; i++) {
upload_service_.UploadEvent();
upload_service_->UploadEvent();
}
EXPECT_TRUE(upload_service_.staged_log_);
upload_service_.UploadEvent();
EXPECT_FALSE(upload_service_.staged_log_);
EXPECT_TRUE(upload_service_->staged_log_);
upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_->staged_log_);
}
TEST_F(UploadServiceTest, EmptyLogsAreNotSent) {
upload_service_.UploadEvent();
EXPECT_FALSE(upload_service_.current_log_);
EXPECT_EQ(0, sender_->send_call_count());
SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_->current_log_);
EXPECT_EQ(0, sender->send_call_count());
}
TEST_F(UploadServiceTest, LogEmptyByDefault) {
UploadService upload_service(new MockSystemProfileSetter(), &metrics_lib_,
kMetricsServer);
"");
// current_log_ should be initialized later as it needs AtExitManager to exit
// in order to gather system information from SysInfo.
@ -147,39 +158,42 @@ TEST_F(UploadServiceTest, LogEmptyByDefault) {
}
TEST_F(UploadServiceTest, CanSendMultipleTimes) {
upload_service_.AddSample(*Crash("user"));
upload_service_.UploadEvent();
SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
std::string first_message = sender_->last_message();
upload_service_->AddSample(*Crash("user"));
upload_service_->UploadEvent();
upload_service_.AddSample(*Crash("kernel"));
upload_service_.UploadEvent();
std::string first_message = sender->last_message();
EXPECT_NE(first_message, sender_->last_message());
upload_service_->AddSample(*Crash("kernel"));
upload_service_->UploadEvent();
EXPECT_NE(first_message, sender->last_message());
}
TEST_F(UploadServiceTest, LogEmptyAfterUpload) {
upload_service_.AddSample(*Crash("user"));
upload_service_->AddSample(*Crash("user"));
EXPECT_TRUE(upload_service_.current_log_);
EXPECT_TRUE(upload_service_->current_log_);
upload_service_.UploadEvent();
EXPECT_FALSE(upload_service_.current_log_);
upload_service_->UploadEvent();
EXPECT_FALSE(upload_service_->current_log_);
}
TEST_F(UploadServiceTest, LogContainsAggregatedValues) {
scoped_ptr<metrics::MetricSample> histogram =
metrics::MetricSample::HistogramSample("foo", 10, 0, 42, 10);
upload_service_.AddSample(*histogram.get());
upload_service_->AddSample(*histogram.get());
scoped_ptr<metrics::MetricSample> histogram2 =
metrics::MetricSample::HistogramSample("foo", 11, 0, 42, 10);
upload_service_.AddSample(*histogram2.get());
upload_service_->AddSample(*histogram2.get());
upload_service_.GatherHistograms();
upload_service_->GatherHistograms();
metrics::ChromeUserMetricsExtension* proto =
upload_service_.current_log_->uma_proto();
upload_service_->current_log_->uma_proto();
EXPECT_EQ(1, proto->histogram_event().size());
}
@ -190,46 +204,41 @@ TEST_F(UploadServiceTest, ExtractChannelFromString) {
metrics::SystemProfileProto::CHANNEL_UNKNOWN);
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_DEV,
SystemProfileCache::ProtoChannelFromString("dev-channel"));
SystemProfileCache::ProtoChannelFromString("dev"));
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_STABLE,
SystemProfileCache::ProtoChannelFromString("stable"));
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_UNKNOWN,
SystemProfileCache::ProtoChannelFromString("dev-channel test"));
SystemProfileCache::ProtoChannelFromString("this is a test"));
}
TEST_F(UploadServiceTest, ValuesInConfigFileAreSent) {
std::string name("os name");
std::string content(
"CHROMEOS_RELEASE_NAME=" + name +
"\nCHROMEOS_RELEASE_VERSION=version\n"
"CHROMEOS_RELEASE_DESCRIPTION=description beta-channel test\n"
"CHROMEOS_RELEASE_TRACK=beta-channel\n"
"CHROMEOS_RELEASE_BUILD_TYPE=developer build\n"
"CHROMEOS_RELEASE_BOARD=myboard");
SenderMock* sender = new SenderMock();
upload_service_->sender_.reset(sender);
SetTestingProperty(metrics::kChannelProperty, "beta");
SetTestingProperty(metrics::kBuildTargetIdProperty, "hello");
SetTestingProperty(metrics::kProductVersionProperty, "1.2.3.4");
base::SysInfo::SetChromeOSVersionInfoForTest(content, base::Time());
scoped_ptr<metrics::MetricSample> histogram =
metrics::MetricSample::SparseHistogramSample("myhistogram", 1);
SystemProfileCache* local_cache_ = new SystemProfileCache(true, "/");
local_cache_->session_id_.reset(new chromeos_metrics::PersistentInteger(
dir_.path().Append("session_id").value()));
upload_service_.system_profile_setter_.reset(local_cache_);
// Reset to create the new log with the profile setter.
upload_service_.Reset();
upload_service_.AddSample(*histogram.get());
upload_service_.UploadEvent();
upload_service_->system_profile_setter_.reset(
new SystemProfileCache(true, dir_.path()));
upload_service_->Reset();
upload_service_->AddSample(*histogram.get());
upload_service_->UploadEvent();
EXPECT_EQ(1, sender_->send_call_count());
EXPECT_TRUE(sender_->is_good_proto());
EXPECT_EQ(1, sender_->last_message_proto().histogram_event().size());
EXPECT_EQ(1, sender->send_call_count());
EXPECT_TRUE(sender->is_good_proto());
EXPECT_EQ(1, sender->last_message_proto().histogram_event().size());
EXPECT_EQ(name, sender_->last_message_proto().system_profile().os().name());
EXPECT_EQ(metrics::SystemProfileProto::CHANNEL_BETA,
sender_->last_message_proto().system_profile().channel());
EXPECT_NE(0, sender_->last_message_proto().client_id());
EXPECT_NE(0,
sender_->last_message_proto().system_profile().build_timestamp());
EXPECT_NE(0, sender_->last_message_proto().session_id());
sender->last_message_proto().system_profile().channel());
EXPECT_NE(0, sender->last_message_proto().client_id());
EXPECT_NE(0, sender->last_message_proto().system_profile().build_timestamp());
EXPECT_NE(0, sender->last_message_proto().session_id());
}
TEST_F(UploadServiceTest, PersistentGUID) {
@ -252,15 +261,11 @@ TEST_F(UploadServiceTest, PersistentGUID) {
}
TEST_F(UploadServiceTest, SessionIdIncrementedAtInitialization) {
cache_.Initialize();
int session_id = cache_.profile_.session_id;
cache_.initialized_ = false;
cache_.Initialize();
EXPECT_EQ(cache_.profile_.session_id, session_id + 1);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
SetTestingProperty(metrics::kBuildTargetIdProperty, "hello");
SystemProfileCache cache(true, dir_.path());
cache.Initialize();
int session_id = cache.profile_.session_id;
cache.initialized_ = false;
cache.Initialize();
EXPECT_EQ(cache.profile_.session_id, session_id + 1);
}