2013-03-14 22:38:08 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2014-03-05 00:58:02 +01:00
|
|
|
#include <time.h>
|
|
|
|
|
|
|
|
#include <errno.h>
|
2013-03-14 22:38:08 +01:00
|
|
|
#include <gtest/gtest.h>
|
2014-04-26 01:55:04 +02:00
|
|
|
#include <pthread.h>
|
2014-03-05 00:58:02 +01:00
|
|
|
#include <signal.h>
|
2014-07-16 01:53:13 +02:00
|
|
|
#include <sys/syscall.h>
|
2014-03-09 00:05:26 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/wait.h>
|
2014-11-26 23:04:26 +01:00
|
|
|
#include <unistd.h>
|
2015-01-14 04:53:15 +01:00
|
|
|
#include <atomic>
|
2013-03-14 22:38:08 +01:00
|
|
|
|
2014-03-05 00:58:02 +01:00
|
|
|
#include "ScopedSignalHandler.h"
|
2016-01-26 22:04:57 +01:00
|
|
|
#include "utils.h"
|
2013-03-14 22:38:08 +01:00
|
|
|
|
2014-09-19 01:11:59 +02:00
|
|
|
#include "private/bionic_constants.h"
|
|
|
|
|
2017-12-20 17:56:18 +01:00
|
|
|
TEST(time, time) {
|
|
|
|
// Acquire time
|
|
|
|
time_t p1, t1 = time(&p1);
|
|
|
|
// valid?
|
|
|
|
ASSERT_NE(static_cast<time_t>(0), t1);
|
|
|
|
ASSERT_NE(static_cast<time_t>(-1), t1);
|
|
|
|
ASSERT_EQ(p1, t1);
|
|
|
|
|
|
|
|
// Acquire time one+ second later
|
|
|
|
usleep(1010000);
|
|
|
|
time_t p2, t2 = time(&p2);
|
|
|
|
// valid?
|
|
|
|
ASSERT_NE(static_cast<time_t>(0), t2);
|
|
|
|
ASSERT_NE(static_cast<time_t>(-1), t2);
|
|
|
|
ASSERT_EQ(p2, t2);
|
|
|
|
|
|
|
|
// Expect time progression
|
|
|
|
ASSERT_LT(p1, p2);
|
|
|
|
ASSERT_LE(t2 - t1, static_cast<time_t>(2));
|
|
|
|
|
|
|
|
// Expect nullptr call to produce same results
|
|
|
|
ASSERT_LE(t2, time(nullptr));
|
|
|
|
ASSERT_LE(time(nullptr) - t2, static_cast<time_t>(1));
|
|
|
|
}
|
|
|
|
|
2013-07-12 20:25:20 +02:00
|
|
|
TEST(time, gmtime) {
|
|
|
|
time_t t = 0;
|
|
|
|
tm* broken_down = gmtime(&t);
|
|
|
|
ASSERT_TRUE(broken_down != NULL);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_sec);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_min);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_hour);
|
|
|
|
ASSERT_EQ(1, broken_down->tm_mday);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_mon);
|
|
|
|
ASSERT_EQ(1970, broken_down->tm_year + 1900);
|
|
|
|
}
|
2013-08-22 20:37:32 +02:00
|
|
|
|
2017-12-08 01:05:57 +01:00
|
|
|
TEST(time, gmtime_r) {
|
|
|
|
struct tm tm = {};
|
|
|
|
time_t t = 0;
|
|
|
|
struct tm* broken_down = gmtime_r(&t, &tm);
|
|
|
|
ASSERT_EQ(broken_down, &tm);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_sec);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_min);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_hour);
|
|
|
|
ASSERT_EQ(1, broken_down->tm_mday);
|
|
|
|
ASSERT_EQ(0, broken_down->tm_mon);
|
|
|
|
ASSERT_EQ(1970, broken_down->tm_year + 1900);
|
|
|
|
}
|
|
|
|
|
2014-04-26 01:55:04 +02:00
|
|
|
static void* gmtime_no_stack_overflow_14313703_fn(void*) {
|
|
|
|
const char* original_tz = getenv("TZ");
|
|
|
|
// Ensure we'll actually have to enter tzload by using a time zone that doesn't exist.
|
|
|
|
setenv("TZ", "gmtime_stack_overflow_14313703", 1);
|
|
|
|
tzset();
|
|
|
|
if (original_tz != NULL) {
|
|
|
|
setenv("TZ", original_tz, 1);
|
|
|
|
}
|
|
|
|
tzset();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, gmtime_no_stack_overflow_14313703) {
|
|
|
|
// Is it safe to call tzload on a thread with a small stack?
|
|
|
|
// http://b/14313703
|
|
|
|
// https://code.google.com/p/android/issues/detail?id=61130
|
2016-02-05 20:18:41 +01:00
|
|
|
pthread_attr_t a;
|
|
|
|
ASSERT_EQ(0, pthread_attr_init(&a));
|
|
|
|
ASSERT_EQ(0, pthread_attr_setstacksize(&a, PTHREAD_STACK_MIN));
|
2014-04-26 01:55:04 +02:00
|
|
|
|
|
|
|
pthread_t t;
|
2016-02-05 20:18:41 +01:00
|
|
|
ASSERT_EQ(0, pthread_create(&t, &a, gmtime_no_stack_overflow_14313703_fn, NULL));
|
|
|
|
ASSERT_EQ(0, pthread_join(t, nullptr));
|
2014-04-26 01:55:04 +02:00
|
|
|
}
|
|
|
|
|
2014-05-27 10:04:04 +02:00
|
|
|
TEST(time, mktime_empty_TZ) {
|
|
|
|
// tzcode used to have a bug where it didn't reinitialize some internal state.
|
|
|
|
|
|
|
|
// Choose a time where DST is set.
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
t.tm_year = 1980 - 1900;
|
|
|
|
t.tm_mon = 6;
|
|
|
|
t.tm_mday = 2;
|
|
|
|
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_EQ(static_cast<time_t>(331372800U), mktime(&t));
|
|
|
|
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
t.tm_year = 1980 - 1900;
|
|
|
|
t.tm_mon = 6;
|
|
|
|
t.tm_mday = 2;
|
|
|
|
|
|
|
|
setenv("TZ", "", 1); // Implies UTC.
|
|
|
|
tzset();
|
|
|
|
ASSERT_EQ(static_cast<time_t>(331344000U), mktime(&t));
|
|
|
|
}
|
|
|
|
|
2013-08-22 20:37:32 +02:00
|
|
|
TEST(time, mktime_10310929) {
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
t.tm_year = 200;
|
|
|
|
t.tm_mon = 2;
|
|
|
|
t.tm_mday = 10;
|
|
|
|
|
2013-10-19 01:21:54 +02:00
|
|
|
#if !defined(__LP64__)
|
|
|
|
// 32-bit bionic stupidly had a signed 32-bit time_t.
|
2013-08-22 20:37:32 +02:00
|
|
|
ASSERT_EQ(-1, mktime(&t));
|
2016-08-13 01:28:36 +02:00
|
|
|
ASSERT_EQ(EOVERFLOW, errno);
|
2013-10-19 01:21:54 +02:00
|
|
|
#else
|
|
|
|
// Everyone else should be using a signed 64-bit time_t.
|
|
|
|
ASSERT_GE(sizeof(time_t) * 8, 64U);
|
|
|
|
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
tzset();
|
2016-08-13 01:28:36 +02:00
|
|
|
errno = 0;
|
2013-10-19 01:21:54 +02:00
|
|
|
ASSERT_EQ(static_cast<time_t>(4108348800U), mktime(&t));
|
2016-08-13 01:28:36 +02:00
|
|
|
ASSERT_EQ(0, errno);
|
2013-10-19 01:21:54 +02:00
|
|
|
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
tzset();
|
2016-08-13 01:28:36 +02:00
|
|
|
errno = 0;
|
2013-10-19 01:21:54 +02:00
|
|
|
ASSERT_EQ(static_cast<time_t>(4108320000U), mktime(&t));
|
2016-08-13 01:28:36 +02:00
|
|
|
ASSERT_EQ(0, errno);
|
2013-08-22 20:37:32 +02:00
|
|
|
#endif
|
2013-12-21 03:43:21 +01:00
|
|
|
}
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2016-08-13 01:28:36 +02:00
|
|
|
TEST(time, mktime_EOVERFLOW) {
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
2016-09-06 22:25:53 +02:00
|
|
|
|
|
|
|
// LP32 year range is 1901-2038, so this year is guaranteed not to overflow.
|
|
|
|
t.tm_year = 2016 - 1900;
|
|
|
|
|
2016-08-13 01:28:36 +02:00
|
|
|
t.tm_mon = 2;
|
|
|
|
t.tm_mday = 10;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
ASSERT_NE(static_cast<time_t>(-1), mktime(&t));
|
|
|
|
ASSERT_EQ(0, errno);
|
|
|
|
|
2016-09-06 22:25:53 +02:00
|
|
|
// This will overflow for LP32 or LP64.
|
2016-08-13 01:28:36 +02:00
|
|
|
t.tm_year = INT_MAX;
|
2016-09-06 22:25:53 +02:00
|
|
|
|
|
|
|
errno = 0;
|
2016-08-13 01:28:36 +02:00
|
|
|
ASSERT_EQ(static_cast<time_t>(-1), mktime(&t));
|
|
|
|
ASSERT_EQ(EOVERFLOW, errno);
|
|
|
|
}
|
|
|
|
|
2014-03-11 02:19:03 +01:00
|
|
|
TEST(time, strftime) {
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
t.tm_year = 200;
|
|
|
|
t.tm_mon = 2;
|
|
|
|
t.tm_mday = 10;
|
|
|
|
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
// Seconds since the epoch.
|
|
|
|
#if defined(__BIONIC__) || defined(__LP64__) // Not 32-bit glibc.
|
|
|
|
EXPECT_EQ(10U, strftime(buf, sizeof(buf), "%s", &t));
|
|
|
|
EXPECT_STREQ("4108320000", buf);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Date and time as text.
|
|
|
|
EXPECT_EQ(24U, strftime(buf, sizeof(buf), "%c", &t));
|
|
|
|
EXPECT_STREQ("Sun Mar 10 00:00:00 2100", buf);
|
|
|
|
}
|
|
|
|
|
2015-11-13 01:51:31 +01:00
|
|
|
TEST(time, strftime_null_tm_zone) {
|
|
|
|
// Netflix on Nexus Player wouldn't start (http://b/25170306).
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
tzset();
|
|
|
|
|
|
|
|
t.tm_isdst = 0; // "0 if Daylight Savings Time is not in effect".
|
|
|
|
EXPECT_EQ(5U, strftime(buf, sizeof(buf), "<%Z>", &t));
|
|
|
|
EXPECT_STREQ("<PST>", buf);
|
|
|
|
|
|
|
|
#if defined(__BIONIC__) // glibc 2.19 only copes with tm_isdst being 0 and 1.
|
|
|
|
t.tm_isdst = 2; // "positive if Daylight Savings Time is in effect"
|
|
|
|
EXPECT_EQ(5U, strftime(buf, sizeof(buf), "<%Z>", &t));
|
|
|
|
EXPECT_STREQ("<PDT>", buf);
|
|
|
|
|
|
|
|
t.tm_isdst = -123; // "and negative if the information is not available".
|
|
|
|
EXPECT_EQ(2U, strftime(buf, sizeof(buf), "<%Z>", &t));
|
|
|
|
EXPECT_STREQ("<>", buf);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
tzset();
|
|
|
|
|
|
|
|
t.tm_isdst = 0;
|
|
|
|
EXPECT_EQ(5U, strftime(buf, sizeof(buf), "<%Z>", &t));
|
|
|
|
EXPECT_STREQ("<UTC>", buf);
|
|
|
|
|
|
|
|
#if defined(__BIONIC__) // glibc 2.19 thinks UTC DST is "UTC".
|
|
|
|
t.tm_isdst = 1; // UTC has no DST.
|
|
|
|
EXPECT_EQ(2U, strftime(buf, sizeof(buf), "<%Z>", &t));
|
|
|
|
EXPECT_STREQ("<>", buf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-07-29 23:04:17 +02:00
|
|
|
TEST(time, strftime_l) {
|
|
|
|
locale_t cloc = newlocale(LC_ALL, "C.UTF-8", 0);
|
|
|
|
locale_t old_locale = uselocale(cloc);
|
|
|
|
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
|
|
|
|
struct tm t;
|
|
|
|
memset(&t, 0, sizeof(tm));
|
|
|
|
t.tm_year = 200;
|
|
|
|
t.tm_mon = 2;
|
|
|
|
t.tm_mday = 10;
|
|
|
|
|
|
|
|
// Date and time as text.
|
|
|
|
char buf[64];
|
|
|
|
EXPECT_EQ(24U, strftime_l(buf, sizeof(buf), "%c", &t, cloc));
|
|
|
|
EXPECT_STREQ("Sun Mar 10 00:00:00 2100", buf);
|
|
|
|
|
|
|
|
uselocale(old_locale);
|
|
|
|
freelocale(cloc);
|
|
|
|
}
|
|
|
|
|
2014-03-11 02:19:03 +01:00
|
|
|
TEST(time, strptime) {
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
|
|
|
|
struct tm t;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
memset(&t, 0, sizeof(t));
|
|
|
|
strptime("11:14", "%R", &t);
|
|
|
|
strftime(buf, sizeof(buf), "%H:%M", &t);
|
|
|
|
EXPECT_STREQ("11:14", buf);
|
|
|
|
|
|
|
|
memset(&t, 0, sizeof(t));
|
|
|
|
strptime("09:41:53", "%T", &t);
|
|
|
|
strftime(buf, sizeof(buf), "%H:%M:%S", &t);
|
|
|
|
EXPECT_STREQ("09:41:53", buf);
|
|
|
|
}
|
|
|
|
|
2018-02-14 08:14:12 +01:00
|
|
|
TEST(time, strptime_l) {
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
|
|
|
|
struct tm t;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
memset(&t, 0, sizeof(t));
|
|
|
|
strptime_l("11:14", "%R", &t, LC_GLOBAL_LOCALE);
|
|
|
|
strftime_l(buf, sizeof(buf), "%H:%M", &t, LC_GLOBAL_LOCALE);
|
|
|
|
EXPECT_STREQ("11:14", buf);
|
|
|
|
|
|
|
|
memset(&t, 0, sizeof(t));
|
|
|
|
strptime_l("09:41:53", "%T", &t, LC_GLOBAL_LOCALE);
|
|
|
|
strftime_l(buf, sizeof(buf), "%H:%M:%S", &t, LC_GLOBAL_LOCALE);
|
|
|
|
EXPECT_STREQ("09:41:53", buf);
|
|
|
|
}
|
|
|
|
|
2014-03-05 00:58:02 +01:00
|
|
|
void SetTime(timer_t t, time_t value_s, time_t value_ns, time_t interval_s, time_t interval_ns) {
|
|
|
|
itimerspec ts;
|
|
|
|
ts.it_value.tv_sec = value_s;
|
|
|
|
ts.it_value.tv_nsec = value_ns;
|
|
|
|
ts.it_interval.tv_sec = interval_s;
|
|
|
|
ts.it_interval.tv_nsec = interval_ns;
|
2015-06-19 02:01:11 +02:00
|
|
|
ASSERT_EQ(0, timer_settime(t, 0, &ts, NULL));
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void NoOpNotifyFunction(sigval_t) {
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_create) {
|
|
|
|
sigevent_t se;
|
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_THREAD;
|
|
|
|
se.sigev_notify_function = NoOpNotifyFunction;
|
|
|
|
timer_t timer_id;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_MONOTONIC, &se, &timer_id));
|
|
|
|
|
2016-01-26 22:04:57 +01:00
|
|
|
pid_t pid = fork();
|
2014-03-05 00:58:02 +01:00
|
|
|
ASSERT_NE(-1, pid) << strerror(errno);
|
|
|
|
|
|
|
|
if (pid == 0) {
|
|
|
|
// Timers are not inherited by the child.
|
|
|
|
ASSERT_EQ(-1, timer_delete(timer_id));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
_exit(0);
|
|
|
|
}
|
|
|
|
|
2016-01-26 22:04:57 +01:00
|
|
|
AssertChildExited(pid, 0);
|
2014-03-05 00:58:02 +01:00
|
|
|
|
|
|
|
ASSERT_EQ(0, timer_delete(timer_id));
|
|
|
|
}
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
static int timer_create_SIGEV_SIGNAL_signal_handler_invocation_count;
|
2014-03-05 00:58:02 +01:00
|
|
|
static void timer_create_SIGEV_SIGNAL_signal_handler(int signal_number) {
|
|
|
|
++timer_create_SIGEV_SIGNAL_signal_handler_invocation_count;
|
|
|
|
ASSERT_EQ(SIGUSR1, signal_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_create_SIGEV_SIGNAL) {
|
|
|
|
sigevent_t se;
|
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_SIGNAL;
|
|
|
|
se.sigev_signo = SIGUSR1;
|
|
|
|
|
|
|
|
timer_t timer_id;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_MONOTONIC, &se, &timer_id));
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
timer_create_SIGEV_SIGNAL_signal_handler_invocation_count = 0;
|
2014-03-05 00:58:02 +01:00
|
|
|
ScopedSignalHandler ssh(SIGUSR1, timer_create_SIGEV_SIGNAL_signal_handler);
|
|
|
|
|
|
|
|
ASSERT_EQ(0, timer_create_SIGEV_SIGNAL_signal_handler_invocation_count);
|
|
|
|
|
|
|
|
itimerspec ts;
|
|
|
|
ts.it_value.tv_sec = 0;
|
|
|
|
ts.it_value.tv_nsec = 1;
|
|
|
|
ts.it_interval.tv_sec = 0;
|
|
|
|
ts.it_interval.tv_nsec = 0;
|
2015-06-19 02:01:11 +02:00
|
|
|
ASSERT_EQ(0, timer_settime(timer_id, 0, &ts, NULL));
|
2014-03-05 00:58:02 +01:00
|
|
|
|
|
|
|
usleep(500000);
|
|
|
|
ASSERT_EQ(1, timer_create_SIGEV_SIGNAL_signal_handler_invocation_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Counter {
|
2015-01-14 04:53:15 +01:00
|
|
|
private:
|
|
|
|
std::atomic<int> value;
|
2014-03-05 00:58:02 +01:00
|
|
|
timer_t timer_id;
|
|
|
|
sigevent_t se;
|
2014-10-21 04:09:19 +02:00
|
|
|
bool timer_valid;
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
void Create() {
|
|
|
|
ASSERT_FALSE(timer_valid);
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_REALTIME, &se, &timer_id));
|
|
|
|
timer_valid = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
2016-05-03 21:08:05 +02:00
|
|
|
explicit Counter(void (*fn)(sigval_t)) : value(0), timer_valid(false) {
|
2014-03-05 00:58:02 +01:00
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_THREAD;
|
|
|
|
se.sigev_notify_function = fn;
|
|
|
|
se.sigev_value.sival_ptr = this;
|
2014-10-21 04:09:19 +02:00
|
|
|
Create();
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
2014-10-21 04:09:19 +02:00
|
|
|
void DeleteTimer() {
|
|
|
|
ASSERT_TRUE(timer_valid);
|
|
|
|
ASSERT_EQ(0, timer_delete(timer_id));
|
|
|
|
timer_valid = false;
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
~Counter() {
|
2014-10-21 04:09:19 +02:00
|
|
|
if (timer_valid) {
|
|
|
|
DeleteTimer();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
int Value() const {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2014-10-21 04:09:19 +02:00
|
|
|
void SetTime(time_t value_s, time_t value_ns, time_t interval_s, time_t interval_ns) {
|
|
|
|
::SetTime(timer_id, value_s, value_ns, interval_s, interval_ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ValueUpdated() {
|
2015-01-14 04:53:15 +01:00
|
|
|
int current_value = value;
|
2014-10-21 04:09:19 +02:00
|
|
|
time_t start = time(NULL);
|
|
|
|
while (current_value == value && (time(NULL) - start) < 5) {
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
2014-10-21 04:09:19 +02:00
|
|
|
return current_value != value;
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void CountNotifyFunction(sigval_t value) {
|
|
|
|
Counter* cd = reinterpret_cast<Counter*>(value.sival_ptr);
|
|
|
|
++cd->value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void CountAndDisarmNotifyFunction(sigval_t value) {
|
|
|
|
Counter* cd = reinterpret_cast<Counter*>(value.sival_ptr);
|
|
|
|
++cd->value;
|
|
|
|
|
|
|
|
// Setting the initial expiration time to 0 disarms the timer.
|
2014-10-21 04:09:19 +02:00
|
|
|
cd->SetTime(0, 0, 1, 0);
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST(time, timer_settime_0) {
|
|
|
|
Counter counter(Counter::CountAndDisarmNotifyFunction);
|
2015-01-14 04:53:15 +01:00
|
|
|
ASSERT_EQ(0, counter.Value());
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2015-08-11 20:23:16 +02:00
|
|
|
counter.SetTime(0, 500000000, 1, 0);
|
|
|
|
sleep(1);
|
2014-03-05 00:58:02 +01:00
|
|
|
|
|
|
|
// The count should just be 1 because we disarmed the timer the first time it fired.
|
2015-01-14 04:53:15 +01:00
|
|
|
ASSERT_EQ(1, counter.Value());
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_settime_repeats) {
|
|
|
|
Counter counter(Counter::CountNotifyFunction);
|
2015-01-14 04:53:15 +01:00
|
|
|
ASSERT_EQ(0, counter.Value());
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2014-10-21 04:09:19 +02:00
|
|
|
counter.SetTime(0, 1, 0, 10);
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
2015-01-14 04:53:15 +01:00
|
|
|
counter.DeleteTimer();
|
|
|
|
// Add a sleep as other threads may be calling the callback function when the timer is deleted.
|
|
|
|
usleep(500000);
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
static int timer_create_NULL_signal_handler_invocation_count;
|
2014-03-05 00:58:02 +01:00
|
|
|
static void timer_create_NULL_signal_handler(int signal_number) {
|
|
|
|
++timer_create_NULL_signal_handler_invocation_count;
|
|
|
|
ASSERT_EQ(SIGALRM, signal_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_create_NULL) {
|
|
|
|
// A NULL sigevent* is equivalent to asking for SIGEV_SIGNAL for SIGALRM.
|
|
|
|
timer_t timer_id;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_MONOTONIC, NULL, &timer_id));
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
timer_create_NULL_signal_handler_invocation_count = 0;
|
2014-03-05 00:58:02 +01:00
|
|
|
ScopedSignalHandler ssh(SIGALRM, timer_create_NULL_signal_handler);
|
|
|
|
|
|
|
|
ASSERT_EQ(0, timer_create_NULL_signal_handler_invocation_count);
|
|
|
|
|
|
|
|
SetTime(timer_id, 0, 1, 0, 0);
|
|
|
|
usleep(500000);
|
|
|
|
|
|
|
|
ASSERT_EQ(1, timer_create_NULL_signal_handler_invocation_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_create_EINVAL) {
|
|
|
|
clockid_t invalid_clock = 16;
|
|
|
|
|
|
|
|
// A SIGEV_SIGNAL timer is easy; the kernel does all that.
|
|
|
|
timer_t timer_id;
|
|
|
|
ASSERT_EQ(-1, timer_create(invalid_clock, NULL, &timer_id));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
|
|
|
|
// A SIGEV_THREAD timer is more interesting because we have stuff to clean up.
|
|
|
|
sigevent_t se;
|
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_THREAD;
|
|
|
|
se.sigev_notify_function = NoOpNotifyFunction;
|
|
|
|
ASSERT_EQ(-1, timer_create(invalid_clock, &se, &timer_id));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_delete_multiple) {
|
|
|
|
timer_t timer_id;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_MONOTONIC, NULL, &timer_id));
|
|
|
|
ASSERT_EQ(0, timer_delete(timer_id));
|
|
|
|
ASSERT_EQ(-1, timer_delete(timer_id));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
|
|
|
|
sigevent_t se;
|
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_THREAD;
|
|
|
|
se.sigev_notify_function = NoOpNotifyFunction;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_MONOTONIC, &se, &timer_id));
|
|
|
|
ASSERT_EQ(0, timer_delete(timer_id));
|
|
|
|
ASSERT_EQ(-1, timer_delete(timer_id));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_create_multiple) {
|
|
|
|
Counter counter1(Counter::CountNotifyFunction);
|
|
|
|
Counter counter2(Counter::CountNotifyFunction);
|
|
|
|
Counter counter3(Counter::CountNotifyFunction);
|
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
ASSERT_EQ(0, counter1.Value());
|
|
|
|
ASSERT_EQ(0, counter2.Value());
|
|
|
|
ASSERT_EQ(0, counter3.Value());
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2015-06-19 01:19:02 +02:00
|
|
|
counter2.SetTime(0, 500000000, 0, 0);
|
|
|
|
sleep(1);
|
2014-03-05 00:58:02 +01:00
|
|
|
|
2015-01-14 04:53:15 +01:00
|
|
|
EXPECT_EQ(0, counter1.Value());
|
|
|
|
EXPECT_EQ(1, counter2.Value());
|
|
|
|
EXPECT_EQ(0, counter3.Value());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test to verify that disarming a repeatable timer disables the callbacks.
|
|
|
|
TEST(time, timer_disarm_terminates) {
|
|
|
|
Counter counter(Counter::CountNotifyFunction);
|
|
|
|
ASSERT_EQ(0, counter.Value());
|
|
|
|
|
|
|
|
counter.SetTime(0, 1, 0, 1);
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
|
|
|
|
counter.SetTime(0, 0, 0, 0);
|
|
|
|
// Add a sleep as the kernel may have pending events when the timer is disarmed.
|
|
|
|
usleep(500000);
|
|
|
|
int value = counter.Value();
|
|
|
|
usleep(500000);
|
|
|
|
|
|
|
|
// Verify the counter has not been incremented.
|
|
|
|
ASSERT_EQ(value, counter.Value());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test to verify that deleting a repeatable timer disables the callbacks.
|
|
|
|
TEST(time, timer_delete_terminates) {
|
|
|
|
Counter counter(Counter::CountNotifyFunction);
|
|
|
|
ASSERT_EQ(0, counter.Value());
|
|
|
|
|
|
|
|
counter.SetTime(0, 1, 0, 1);
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
ASSERT_TRUE(counter.ValueUpdated());
|
|
|
|
|
|
|
|
counter.DeleteTimer();
|
|
|
|
// Add a sleep as other threads may be calling the callback function when the timer is deleted.
|
|
|
|
usleep(500000);
|
|
|
|
int value = counter.Value();
|
|
|
|
usleep(500000);
|
|
|
|
|
|
|
|
// Verify the counter has not been incremented.
|
|
|
|
ASSERT_EQ(value, counter.Value());
|
2014-03-05 00:58:02 +01:00
|
|
|
}
|
2014-03-21 04:47:45 +01:00
|
|
|
|
|
|
|
struct TimerDeleteData {
|
|
|
|
timer_t timer_id;
|
Be more strict about using invalid `pthread_t`s.
Another release, another attempt to remove the global thread list.
But this time, let's admit that it's not going away. We can switch to using
a read/write lock for the global thread list, and to aborting rather than
quietly returning ESRCH if we're given an invalid pthread_t.
This change affects pthread_detach, pthread_getcpuclockid,
pthread_getschedparam/pthread_setschedparam, pthread_join, and pthread_kill:
instead of returning ESRCH when passed an invalid pthread_t, if you're
targeting O or above, they'll abort with the message "attempt to use
invalid pthread_t".
Note that this doesn't change behavior as much as you might think: the old
lookup only held the global thread list lock for the duration of the lookup,
so there was still a race between that and the dereference in the caller,
given that callers actually need the tid to pass to some syscall or other,
and sometimes update fields in the pthread_internal_t struct too.
(This patch replaces such users with calls to pthread_gettid_np, which
at least makes the TOCTOU window smaller.)
We can't check thread->tid against 0 to see whether a pthread_t is still
valid because a dead thread gets its thread struct unmapped along with its
stack, so the dereference isn't safe.
Taking the affected functions one by one:
* pthread_getcpuclockid and pthread_getschedparam/pthread_setschedparam
should be fine. Unsafe calls to those seem highly unlikely.
* Unsafe pthread_detach callers probably want to switch to
pthread_attr_setdetachstate instead, or using
pthread_detach(pthread_self()) from the new thread's start routine
rather than doing the detach in the parent.
* pthread_join calls should be safe anyway, because a joinable thread
won't actually exit and unmap until it's joined. If you're joining an
unjoinable thread, the fix is to stop marking it detached. If you're
joining an already-joined thread, you need to rethink your design.
* Unsafe pthread_kill calls aren't portably fixable. (And are obviously
inherently non-portable as-is.) The best alternative on Android is to
use pthread_gettid_np at some point that you know the thread to be
alive, and then call kill/tgkill directly.
That's still not completely safe because if you're too late, the tid
may have been reused, but then your code is inherently unsafe anyway.
Bug: http://b/19636317
Test: ran tests
Change-Id: I0372c4428e8a7f1c3af5c9334f5d9c25f2c73f21
2017-02-14 02:59:29 +01:00
|
|
|
pid_t tid;
|
2014-03-21 04:47:45 +01:00
|
|
|
volatile bool complete;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void TimerDeleteCallback(sigval_t value) {
|
|
|
|
TimerDeleteData* tdd = reinterpret_cast<TimerDeleteData*>(value.sival_ptr);
|
|
|
|
|
Be more strict about using invalid `pthread_t`s.
Another release, another attempt to remove the global thread list.
But this time, let's admit that it's not going away. We can switch to using
a read/write lock for the global thread list, and to aborting rather than
quietly returning ESRCH if we're given an invalid pthread_t.
This change affects pthread_detach, pthread_getcpuclockid,
pthread_getschedparam/pthread_setschedparam, pthread_join, and pthread_kill:
instead of returning ESRCH when passed an invalid pthread_t, if you're
targeting O or above, they'll abort with the message "attempt to use
invalid pthread_t".
Note that this doesn't change behavior as much as you might think: the old
lookup only held the global thread list lock for the duration of the lookup,
so there was still a race between that and the dereference in the caller,
given that callers actually need the tid to pass to some syscall or other,
and sometimes update fields in the pthread_internal_t struct too.
(This patch replaces such users with calls to pthread_gettid_np, which
at least makes the TOCTOU window smaller.)
We can't check thread->tid against 0 to see whether a pthread_t is still
valid because a dead thread gets its thread struct unmapped along with its
stack, so the dereference isn't safe.
Taking the affected functions one by one:
* pthread_getcpuclockid and pthread_getschedparam/pthread_setschedparam
should be fine. Unsafe calls to those seem highly unlikely.
* Unsafe pthread_detach callers probably want to switch to
pthread_attr_setdetachstate instead, or using
pthread_detach(pthread_self()) from the new thread's start routine
rather than doing the detach in the parent.
* pthread_join calls should be safe anyway, because a joinable thread
won't actually exit and unmap until it's joined. If you're joining an
unjoinable thread, the fix is to stop marking it detached. If you're
joining an already-joined thread, you need to rethink your design.
* Unsafe pthread_kill calls aren't portably fixable. (And are obviously
inherently non-portable as-is.) The best alternative on Android is to
use pthread_gettid_np at some point that you know the thread to be
alive, and then call kill/tgkill directly.
That's still not completely safe because if you're too late, the tid
may have been reused, but then your code is inherently unsafe anyway.
Bug: http://b/19636317
Test: ran tests
Change-Id: I0372c4428e8a7f1c3af5c9334f5d9c25f2c73f21
2017-02-14 02:59:29 +01:00
|
|
|
tdd->tid = gettid();
|
2014-03-21 04:47:45 +01:00
|
|
|
timer_delete(tdd->timer_id);
|
|
|
|
tdd->complete = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, timer_delete_from_timer_thread) {
|
|
|
|
TimerDeleteData tdd;
|
|
|
|
sigevent_t se;
|
|
|
|
|
|
|
|
memset(&se, 0, sizeof(se));
|
|
|
|
se.sigev_notify = SIGEV_THREAD;
|
|
|
|
se.sigev_notify_function = TimerDeleteCallback;
|
|
|
|
se.sigev_value.sival_ptr = &tdd;
|
|
|
|
|
|
|
|
tdd.complete = false;
|
|
|
|
ASSERT_EQ(0, timer_create(CLOCK_REALTIME, &se, &tdd.timer_id));
|
|
|
|
|
|
|
|
itimerspec ts;
|
2015-02-19 02:11:47 +01:00
|
|
|
ts.it_value.tv_sec = 1;
|
|
|
|
ts.it_value.tv_nsec = 0;
|
2014-03-21 04:47:45 +01:00
|
|
|
ts.it_interval.tv_sec = 0;
|
|
|
|
ts.it_interval.tv_nsec = 0;
|
2015-02-19 02:11:47 +01:00
|
|
|
ASSERT_EQ(0, timer_settime(tdd.timer_id, 0, &ts, NULL));
|
2014-03-21 04:47:45 +01:00
|
|
|
|
|
|
|
time_t cur_time = time(NULL);
|
|
|
|
while (!tdd.complete && (time(NULL) - cur_time) < 5);
|
|
|
|
ASSERT_TRUE(tdd.complete);
|
|
|
|
|
|
|
|
#if defined(__BIONIC__)
|
|
|
|
// Since bionic timers are implemented by creating a thread to handle the
|
|
|
|
// callback, verify that the thread actually completes.
|
|
|
|
cur_time = time(NULL);
|
Be more strict about using invalid `pthread_t`s.
Another release, another attempt to remove the global thread list.
But this time, let's admit that it's not going away. We can switch to using
a read/write lock for the global thread list, and to aborting rather than
quietly returning ESRCH if we're given an invalid pthread_t.
This change affects pthread_detach, pthread_getcpuclockid,
pthread_getschedparam/pthread_setschedparam, pthread_join, and pthread_kill:
instead of returning ESRCH when passed an invalid pthread_t, if you're
targeting O or above, they'll abort with the message "attempt to use
invalid pthread_t".
Note that this doesn't change behavior as much as you might think: the old
lookup only held the global thread list lock for the duration of the lookup,
so there was still a race between that and the dereference in the caller,
given that callers actually need the tid to pass to some syscall or other,
and sometimes update fields in the pthread_internal_t struct too.
(This patch replaces such users with calls to pthread_gettid_np, which
at least makes the TOCTOU window smaller.)
We can't check thread->tid against 0 to see whether a pthread_t is still
valid because a dead thread gets its thread struct unmapped along with its
stack, so the dereference isn't safe.
Taking the affected functions one by one:
* pthread_getcpuclockid and pthread_getschedparam/pthread_setschedparam
should be fine. Unsafe calls to those seem highly unlikely.
* Unsafe pthread_detach callers probably want to switch to
pthread_attr_setdetachstate instead, or using
pthread_detach(pthread_self()) from the new thread's start routine
rather than doing the detach in the parent.
* pthread_join calls should be safe anyway, because a joinable thread
won't actually exit and unmap until it's joined. If you're joining an
unjoinable thread, the fix is to stop marking it detached. If you're
joining an already-joined thread, you need to rethink your design.
* Unsafe pthread_kill calls aren't portably fixable. (And are obviously
inherently non-portable as-is.) The best alternative on Android is to
use pthread_gettid_np at some point that you know the thread to be
alive, and then call kill/tgkill directly.
That's still not completely safe because if you're too late, the tid
may have been reused, but then your code is inherently unsafe anyway.
Bug: http://b/19636317
Test: ran tests
Change-Id: I0372c4428e8a7f1c3af5c9334f5d9c25f2c73f21
2017-02-14 02:59:29 +01:00
|
|
|
while ((kill(tdd.tid, 0) != -1 || errno != ESRCH) && (time(NULL) - cur_time) < 5);
|
|
|
|
ASSERT_EQ(-1, kill(tdd.tid, 0));
|
|
|
|
ASSERT_EQ(ESRCH, errno);
|
2014-03-21 04:47:45 +01:00
|
|
|
#endif
|
|
|
|
}
|
2014-07-16 01:53:13 +02:00
|
|
|
|
|
|
|
TEST(time, clock_gettime) {
|
|
|
|
// Try to ensure that our vdso clock_gettime is working.
|
|
|
|
timespec ts1;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts1));
|
|
|
|
timespec ts2;
|
|
|
|
ASSERT_EQ(0, syscall(__NR_clock_gettime, CLOCK_MONOTONIC, &ts2));
|
|
|
|
|
|
|
|
// What's the difference between the two?
|
|
|
|
ts2.tv_sec -= ts1.tv_sec;
|
|
|
|
ts2.tv_nsec -= ts1.tv_nsec;
|
|
|
|
if (ts2.tv_nsec < 0) {
|
|
|
|
--ts2.tv_sec;
|
2014-09-19 01:11:59 +02:00
|
|
|
ts2.tv_nsec += NS_PER_S;
|
2014-07-16 01:53:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should be less than (a very generous, to try to avoid flakiness) 1000000ns.
|
|
|
|
ASSERT_EQ(0, ts2.tv_sec);
|
|
|
|
ASSERT_LT(ts2.tv_nsec, 1000000);
|
|
|
|
}
|
2014-09-26 22:32:47 +02:00
|
|
|
|
2017-10-05 19:33:18 +02:00
|
|
|
TEST(time, clock_gettime_CLOCK_REALTIME) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_gettime_CLOCK_MONOTONIC) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_gettime_CLOCK_PROCESS_CPUTIME_ID) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_gettime_CLOCK_THREAD_CPUTIME_ID) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_gettime_CLOCK_BOOTTIME) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(CLOCK_BOOTTIME, &ts));
|
|
|
|
}
|
|
|
|
|
2017-11-21 21:29:06 +01:00
|
|
|
TEST(time, clock_gettime_unknown) {
|
|
|
|
errno = 0;
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(-1, clock_gettime(-1, &ts));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_CLOCK_REALTIME) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_getres(CLOCK_REALTIME, &ts));
|
|
|
|
ASSERT_EQ(1, ts.tv_nsec);
|
|
|
|
ASSERT_EQ(0, ts.tv_sec);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_CLOCK_MONOTONIC) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_getres(CLOCK_MONOTONIC, &ts));
|
|
|
|
ASSERT_EQ(1, ts.tv_nsec);
|
|
|
|
ASSERT_EQ(0, ts.tv_sec);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_CLOCK_PROCESS_CPUTIME_ID) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_getres(CLOCK_PROCESS_CPUTIME_ID, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_CLOCK_THREAD_CPUTIME_ID) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_getres(CLOCK_THREAD_CPUTIME_ID, &ts));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_CLOCK_BOOTTIME) {
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_getres(CLOCK_BOOTTIME, &ts));
|
|
|
|
ASSERT_EQ(1, ts.tv_nsec);
|
|
|
|
ASSERT_EQ(0, ts.tv_sec);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_getres_unknown) {
|
|
|
|
errno = 0;
|
|
|
|
timespec ts = { -1, -1 };
|
|
|
|
ASSERT_EQ(-1, clock_getres(-1, &ts));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
ASSERT_EQ(-1, ts.tv_nsec);
|
|
|
|
ASSERT_EQ(-1, ts.tv_sec);
|
|
|
|
}
|
|
|
|
|
2014-09-26 22:32:47 +02:00
|
|
|
TEST(time, clock) {
|
2014-10-12 17:50:47 +02:00
|
|
|
// clock(3) is hard to test, but a 1s sleep should cost less than 1ms.
|
|
|
|
clock_t t0 = clock();
|
|
|
|
sleep(1);
|
|
|
|
clock_t t1 = clock();
|
|
|
|
ASSERT_LT(t1 - t0, CLOCKS_PER_SEC / 1000);
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:17:06 +01:00
|
|
|
static pid_t GetInvalidPid() {
|
|
|
|
std::unique_ptr<FILE, decltype(&fclose)> fp{fopen("/proc/sys/kernel/pid_max", "r"), fclose};
|
2014-11-26 23:04:26 +01:00
|
|
|
long pid_max;
|
2017-11-30 03:17:06 +01:00
|
|
|
fscanf(fp.get(), "%ld", &pid_max);
|
|
|
|
return static_cast<pid_t>(pid_max + 1);
|
2014-11-26 23:04:26 +01:00
|
|
|
}
|
|
|
|
|
2017-11-30 03:17:06 +01:00
|
|
|
TEST(time, clock_getcpuclockid_current) {
|
2014-11-26 23:04:26 +01:00
|
|
|
clockid_t clockid;
|
|
|
|
ASSERT_EQ(0, clock_getcpuclockid(getpid(), &clockid));
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(0, clock_gettime(clockid, &ts));
|
2017-11-30 03:17:06 +01:00
|
|
|
}
|
2014-11-26 23:04:26 +01:00
|
|
|
|
2017-11-30 03:17:06 +01:00
|
|
|
TEST(time, clock_getcpuclockid_parent) {
|
|
|
|
clockid_t clockid;
|
2014-11-26 23:04:26 +01:00
|
|
|
ASSERT_EQ(0, clock_getcpuclockid(getppid(), &clockid));
|
2017-11-30 03:17:06 +01:00
|
|
|
timespec ts;
|
2014-11-26 23:04:26 +01:00
|
|
|
ASSERT_EQ(0, clock_gettime(clockid, &ts));
|
2017-11-30 03:17:06 +01:00
|
|
|
}
|
2014-11-26 23:04:26 +01:00
|
|
|
|
2017-11-30 03:17:06 +01:00
|
|
|
TEST(time, clock_getcpuclockid_ESRCH) {
|
2014-11-26 23:04:26 +01:00
|
|
|
// We can't use -1 for invalid pid here, because clock_getcpuclockid() can't detect it.
|
|
|
|
errno = 0;
|
2017-11-28 22:48:45 +01:00
|
|
|
// If this fails, your kernel needs commit e1b6b6ce to be backported.
|
2017-11-30 03:17:06 +01:00
|
|
|
clockid_t clockid;
|
2017-11-28 22:48:45 +01:00
|
|
|
ASSERT_EQ(ESRCH, clock_getcpuclockid(GetInvalidPid(), &clockid)) << "\n"
|
|
|
|
<< "Please ensure that the following kernel patches or their replacements have been applied:\n"
|
|
|
|
<< "* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/"
|
|
|
|
<< "commit/?id=e1b6b6ce55a0a25c8aa8af019095253b2133a41a\n"
|
|
|
|
<< "* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/"
|
|
|
|
<< "commit/?id=c80ed088a519da53f27b798a69748eaabc66aadf\n";
|
2014-11-26 23:04:26 +01:00
|
|
|
ASSERT_EQ(0, errno);
|
|
|
|
}
|
|
|
|
|
2014-10-12 17:50:47 +02:00
|
|
|
TEST(time, clock_settime) {
|
|
|
|
errno = 0;
|
|
|
|
timespec ts;
|
|
|
|
ASSERT_EQ(-1, clock_settime(-1, &ts));
|
|
|
|
ASSERT_EQ(EINVAL, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, clock_nanosleep) {
|
|
|
|
timespec in;
|
|
|
|
timespec out;
|
|
|
|
ASSERT_EQ(EINVAL, clock_nanosleep(-1, 0, &in, &out));
|
2014-09-26 22:32:47 +02:00
|
|
|
}
|
2016-03-26 19:37:55 +01:00
|
|
|
|
|
|
|
TEST(time, clock_nanosleep_thread_cputime_id) {
|
|
|
|
timespec in;
|
|
|
|
in.tv_sec = 1;
|
|
|
|
in.tv_nsec = 0;
|
|
|
|
ASSERT_EQ(EINVAL, clock_nanosleep(CLOCK_THREAD_CPUTIME_ID, 0, &in, nullptr));
|
|
|
|
}
|
2016-10-20 01:02:31 +02:00
|
|
|
|
|
|
|
TEST(time, bug_31938693) {
|
|
|
|
// User-visible symptoms in N:
|
|
|
|
// http://b/31938693
|
|
|
|
// https://code.google.com/p/android/issues/detail?id=225132
|
|
|
|
|
|
|
|
// Actual underlying bug (the code change, not the tzdata upgrade that first exposed the bug):
|
|
|
|
// http://b/31848040
|
|
|
|
|
|
|
|
// This isn't a great test, because very few time zones were actually affected, and there's
|
|
|
|
// no real logic to which ones were affected: it was just a coincidence of the data that came
|
|
|
|
// after them in the tzdata file.
|
|
|
|
|
|
|
|
time_t t = 1475619727;
|
|
|
|
struct tm tm;
|
|
|
|
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(15, tm.tm_hour);
|
|
|
|
|
|
|
|
setenv("TZ", "Europe/London", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(23, tm.tm_hour);
|
|
|
|
|
|
|
|
setenv("TZ", "America/Atka", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(13, tm.tm_hour);
|
|
|
|
|
|
|
|
setenv("TZ", "Pacific/Apia", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(12, tm.tm_hour);
|
|
|
|
|
|
|
|
setenv("TZ", "Pacific/Honolulu", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(12, tm.tm_hour);
|
|
|
|
|
|
|
|
setenv("TZ", "Asia/Magadan", 1);
|
|
|
|
tzset();
|
|
|
|
ASSERT_TRUE(localtime_r(&t, &tm) != nullptr);
|
|
|
|
EXPECT_EQ(9, tm.tm_hour);
|
|
|
|
}
|
2017-01-11 23:34:16 +01:00
|
|
|
|
|
|
|
TEST(time, bug_31339449) {
|
|
|
|
// POSIX says localtime acts as if it calls tzset.
|
|
|
|
// tzset does two things:
|
|
|
|
// 1. it sets the time zone ctime/localtime/mktime/strftime will use.
|
|
|
|
// 2. it sets the global `tzname`.
|
|
|
|
// POSIX says localtime_r need not set `tzname` (2).
|
|
|
|
// Q: should localtime_r set the time zone (1)?
|
|
|
|
// Upstream tzcode (and glibc) answer "no", everyone else answers "yes".
|
|
|
|
|
|
|
|
// Pick a time, any time...
|
|
|
|
time_t t = 1475619727;
|
|
|
|
|
|
|
|
// Call tzset with a specific timezone.
|
|
|
|
setenv("TZ", "America/Atka", 1);
|
|
|
|
tzset();
|
|
|
|
|
|
|
|
// If we change the timezone and call localtime, localtime should use the new timezone.
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
struct tm* tm_p = localtime(&t);
|
|
|
|
EXPECT_EQ(15, tm_p->tm_hour);
|
|
|
|
|
|
|
|
// Reset the timezone back.
|
|
|
|
setenv("TZ", "America/Atka", 1);
|
|
|
|
tzset();
|
|
|
|
|
|
|
|
#if defined(__BIONIC__)
|
|
|
|
// If we change the timezone again and call localtime_r, localtime_r should use the new timezone.
|
|
|
|
setenv("TZ", "America/Los_Angeles", 1);
|
|
|
|
struct tm tm = {};
|
|
|
|
localtime_r(&t, &tm);
|
|
|
|
EXPECT_EQ(15, tm.tm_hour);
|
|
|
|
#else
|
|
|
|
// The BSDs agree with us, but glibc gets this wrong.
|
|
|
|
#endif
|
|
|
|
}
|
2017-12-08 01:05:57 +01:00
|
|
|
|
|
|
|
TEST(time, asctime) {
|
|
|
|
const struct tm tm = {};
|
|
|
|
ASSERT_STREQ("Sun Jan 0 00:00:00 1900\n", asctime(&tm));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, asctime_r) {
|
|
|
|
const struct tm tm = {};
|
|
|
|
char buf[256];
|
|
|
|
ASSERT_EQ(buf, asctime_r(&tm, buf));
|
|
|
|
ASSERT_STREQ("Sun Jan 0 00:00:00 1900\n", buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, ctime) {
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
const time_t t = 0;
|
|
|
|
ASSERT_STREQ("Thu Jan 1 00:00:00 1970\n", ctime(&t));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(time, ctime_r) {
|
|
|
|
setenv("TZ", "UTC", 1);
|
|
|
|
const time_t t = 0;
|
|
|
|
char buf[256];
|
|
|
|
ASSERT_EQ(buf, ctime_r(&t, buf));
|
|
|
|
ASSERT_STREQ("Thu Jan 1 00:00:00 1970\n", buf);
|
|
|
|
}
|