adb: extract soon-to-be-common code.

As a side effect, delete FDE_ACTIVE, which was always set on every
fdevent, and FDE_PENDING, which was an internal implementation detail.

This patch removes spin detection, which will be reimplemented
separately later.

Test: adb_test on host
Test: adbd_test on blueline
Change-Id: I40be3504ce03c4fae5e071fa018542a051b7511d
This commit is contained in:
Josh Gao 2019-07-11 16:35:37 -07:00
parent 0fe1b4bd8c
commit e22cb9f306
4 changed files with 176 additions and 285 deletions

View file

@ -28,14 +28,21 @@
#include "fdevent.h"
#include "fdevent_poll.h"
using namespace std::chrono_literals;
using std::chrono::duration_cast;
void invoke_fde(struct fdevent* fde, unsigned events) {
if (auto f = std::get_if<fd_func>(&fde->func)) {
(*f)(fde->fd.get(), events, fde->arg);
} else if (auto f = std::get_if<fd_func2>(&fde->func)) {
(*f)(fde, events, fde->arg);
} else {
__builtin_unreachable();
}
}
std::string dump_fde(const fdevent* fde) {
std::string state;
if (fde->state & FDE_ACTIVE) {
state += "A";
}
if (fde->state & FDE_PENDING) {
state += "P";
}
if (fde->state & FDE_READ) {
state += "R";
}
@ -53,9 +60,11 @@ fdevent* fdevent_context::Create(unique_fd fd, std::variant<fd_func, fd_func2> f
CheckMainThread();
CHECK_GE(fd.get(), 0);
int fd_num = fd.get();
fdevent* fde = new fdevent();
fde->id = fdevent_id_++;
fde->state = FDE_ACTIVE;
fde->state = 0;
fde->fd = std::move(fd);
fde->func = func;
fde->arg = arg;
@ -66,6 +75,10 @@ fdevent* fdevent_context::Create(unique_fd fd, std::variant<fd_func, fd_func2> f
LOG(ERROR) << "failed to set non-blocking mode for fd " << fde->fd.get();
}
auto [it, inserted] = this->installed_fdevents_.emplace(fd_num, fde);
CHECK(inserted);
UNUSED(it);
this->Register(fde);
return fde;
}
@ -78,18 +91,22 @@ unique_fd fdevent_context::Destroy(fdevent* fde) {
this->Unregister(fde);
auto erased = this->installed_fdevents_.erase(fde->fd.get());
CHECK_EQ(1UL, erased);
unique_fd result = std::move(fde->fd);
delete fde;
return result;
}
void fdevent_context::Add(fdevent* fde, unsigned events) {
Set(fde, (fde->state & FDE_EVENTMASK) | events);
CHECK(!(events & FDE_TIMEOUT));
Set(fde, fde->state | events);
}
void fdevent_context::Del(fdevent* fde, unsigned events) {
CHECK(!(events & FDE_TIMEOUT));
Set(fde, (fde->state & FDE_EVENTMASK) & ~events);
Set(fde, fde->state & ~events);
}
void fdevent_context::SetTimeout(fdevent* fde, std::optional<std::chrono::milliseconds> timeout) {
@ -98,6 +115,56 @@ void fdevent_context::SetTimeout(fdevent* fde, std::optional<std::chrono::millis
fde->last_active = std::chrono::steady_clock::now();
}
std::optional<std::chrono::milliseconds> fdevent_context::CalculatePollDuration() {
std::optional<std::chrono::milliseconds> result = std::nullopt;
auto now = std::chrono::steady_clock::now();
CheckMainThread();
for (const auto& [fd, fde] : this->installed_fdevents_) {
UNUSED(fd);
auto timeout_opt = fde->timeout;
if (timeout_opt) {
auto deadline = fde->last_active + *timeout_opt;
auto time_left = duration_cast<std::chrono::milliseconds>(deadline - now);
if (time_left < 0ms) {
time_left = 0ms;
}
if (!result) {
result = time_left;
} else {
result = std::min(*result, time_left);
}
}
}
return result;
}
void fdevent_context::HandleEvents(const std::vector<fdevent_event>& events) {
for (const auto& event : events) {
invoke_fde(event.fde, event.events);
}
FlushRunQueue();
}
void fdevent_context::FlushRunQueue() {
// We need to be careful around reentrancy here, since a function we call can queue up another
// function.
while (true) {
std::function<void()> fn;
{
std::lock_guard<std::mutex> lock(this->run_queue_mutex_);
if (this->run_queue_.empty()) {
break;
}
fn = std::move(this->run_queue_.front());
this->run_queue_.pop_front();
}
fn();
}
}
void fdevent_context::CheckMainThread() {
if (main_thread_id_) {
CHECK_EQ(*main_thread_id_, android::base::GetThreadId());
@ -118,23 +185,6 @@ void fdevent_context::TerminateLoop() {
Interrupt();
}
void fdevent_context::FlushRunQueue() {
// We need to be careful around reentrancy here, since a function we call can queue up another
// function.
while (true) {
std::function<void()> fn;
{
std::lock_guard<std::mutex> lock(this->run_queue_mutex_);
if (this->run_queue_.empty()) {
break;
}
fn = this->run_queue_.front();
this->run_queue_.pop_front();
}
fn();
}
}
static auto& g_ambient_fdevent_context =
*new std::unique_ptr<fdevent_context>(new fdevent_context_poll());

View file

@ -26,6 +26,7 @@
#include <functional>
#include <mutex>
#include <optional>
#include <unordered_map>
#include <variant>
#include <android-base/thread_annotations.h>
@ -38,19 +39,19 @@
#define FDE_ERROR 0x0004
#define FDE_TIMEOUT 0x0008
// Internal states.
#define FDE_EVENTMASK 0x00ff
#define FDE_STATEMASK 0xff00
#define FDE_ACTIVE 0x0100
#define FDE_PENDING 0x0200
struct fdevent;
typedef void (*fd_func)(int fd, unsigned events, void *userdata);
typedef void (*fd_func2)(struct fdevent* fde, unsigned events, void* userdata);
struct fdevent;
void invoke_fde(struct fdevent* fde, unsigned events);
std::string dump_fde(const fdevent* fde);
struct fdevent_event {
fdevent* fde;
unsigned events;
};
struct fdevent_context {
public:
virtual ~fdevent_context() = default;
@ -62,11 +63,8 @@ struct fdevent_context {
unique_fd Destroy(fdevent* fde);
protected:
// Register an fdevent that is being created by Create with the fdevent_context.
virtual void Register(fdevent* fde) = 0;
// Unregister an fdevent that is being destroyed by Destroy with the fdevent_context.
virtual void Unregister(fdevent* fde) = 0;
virtual void Register(fdevent*) {}
virtual void Unregister(fdevent*) {}
public:
// Change which events should cause notifications.
@ -80,6 +78,15 @@ struct fdevent_context {
// trigger repeatedly every |timeout| ms.
void SetTimeout(fdevent* fde, std::optional<std::chrono::milliseconds> timeout);
protected:
std::optional<std::chrono::milliseconds> CalculatePollDuration();
void HandleEvents(const std::vector<fdevent_event>& events);
private:
// Run all pending functions enqueued via Run().
void FlushRunQueue() EXCLUDES(run_queue_mutex_);
public:
// Loop until TerminateLoop is called, handling events.
// Implementations should call FlushRunQueue on every iteration, and check the value of
// terminate_loop_ to determine whether to stop.
@ -100,12 +107,12 @@ struct fdevent_context {
// Interrupt the run loop.
virtual void Interrupt() = 0;
// Run all pending functions enqueued via Run().
void FlushRunQueue() EXCLUDES(run_queue_mutex_);
std::optional<uint64_t> main_thread_id_ = std::nullopt;
std::atomic<bool> terminate_loop_ = false;
protected:
std::unordered_map<int, fdevent*> installed_fdevents_;
private:
uint64_t fdevent_id_ = 0;
std::mutex run_queue_mutex_;
@ -119,7 +126,6 @@ struct fdevent {
int force_eof = 0;
uint16_t state = 0;
uint16_t events = 0;
std::optional<std::chrono::milliseconds> timeout;
std::chrono::steady_clock::time_point last_active;

View file

@ -78,57 +78,10 @@ fdevent_context_poll::~fdevent_context_poll() {
this->Destroy(this->interrupt_fde_);
}
void fdevent_context_poll::Register(fdevent* fde) {
auto pair = poll_node_map_.emplace(fde->fd.get(), PollNode(fde));
CHECK(pair.second) << "install existing fd " << fde->fd.get();
}
void fdevent_context_poll::Unregister(fdevent* fde) {
if (fde->state & FDE_ACTIVE) {
poll_node_map_.erase(fde->fd.get());
if (fde->state & FDE_PENDING) {
pending_list_.remove(fde);
}
fde->state = 0;
fde->events = 0;
}
}
void fdevent_context_poll::Set(fdevent* fde, unsigned events) {
CheckMainThread();
events &= FDE_EVENTMASK;
if ((fde->state & FDE_EVENTMASK) == events) {
return;
}
CHECK(fde->state & FDE_ACTIVE);
auto it = poll_node_map_.find(fde->fd.get());
CHECK(it != poll_node_map_.end());
PollNode& node = it->second;
if (events & FDE_READ) {
node.pollfd.events |= POLLIN;
} else {
node.pollfd.events &= ~POLLIN;
}
if (events & FDE_WRITE) {
node.pollfd.events |= POLLOUT;
} else {
node.pollfd.events &= ~POLLOUT;
}
fde->state = (fde->state & FDE_STATEMASK) | events;
fde->state = events;
D("fdevent_set: %s, events = %u", dump_fde(fde).c_str(), events);
if (fde->state & FDE_PENDING) {
// If we are pending, make sure we don't signal an event that is no longer wanted.
fde->events &= events;
if (fde->events == 0) {
pending_list_.remove(fde);
fde->state &= ~FDE_PENDING;
}
}
}
static std::string dump_pollfds(const std::vector<adb_pollfd>& pollfds) {
@ -146,204 +99,94 @@ static std::string dump_pollfds(const std::vector<adb_pollfd>& pollfds) {
return result;
}
static std::optional<std::chrono::milliseconds> calculate_timeout(fdevent_context_poll* ctx) {
std::optional<std::chrono::milliseconds> result = std::nullopt;
auto now = std::chrono::steady_clock::now();
ctx->CheckMainThread();
for (const auto& [fd, pollnode] : ctx->poll_node_map_) {
UNUSED(fd);
auto timeout_opt = pollnode.fde->timeout;
if (timeout_opt) {
auto deadline = pollnode.fde->last_active + *timeout_opt;
auto time_left = std::chrono::duration_cast<std::chrono::milliseconds>(deadline - now);
if (time_left < std::chrono::milliseconds::zero()) {
time_left = std::chrono::milliseconds::zero();
}
if (!result) {
result = time_left;
} else {
result = std::min(*result, time_left);
}
}
}
return result;
}
static void fdevent_process(fdevent_context_poll* ctx) {
std::vector<adb_pollfd> pollfds;
for (const auto& pair : ctx->poll_node_map_) {
pollfds.push_back(pair.second.pollfd);
}
CHECK_GT(pollfds.size(), 0u);
D("poll(), pollfds = %s", dump_pollfds(pollfds).c_str());
auto timeout = calculate_timeout(ctx);
int timeout_ms;
if (!timeout) {
timeout_ms = -1;
} else {
timeout_ms = timeout->count();
}
int ret = adb_poll(&pollfds[0], pollfds.size(), timeout_ms);
if (ret == -1) {
PLOG(ERROR) << "poll(), ret = " << ret;
return;
}
auto post_poll = std::chrono::steady_clock::now();
for (const auto& pollfd : pollfds) {
if (pollfd.revents != 0) {
D("for fd %d, revents = %x", pollfd.fd, pollfd.revents);
}
unsigned events = 0;
if (pollfd.revents & POLLIN) {
events |= FDE_READ;
}
if (pollfd.revents & POLLOUT) {
events |= FDE_WRITE;
}
if (pollfd.revents & (POLLERR | POLLHUP | POLLNVAL)) {
// We fake a read, as the rest of the code assumes that errors will
// be detected at that point.
events |= FDE_READ | FDE_ERROR;
}
#if defined(__linux__)
if (pollfd.revents & POLLRDHUP) {
events |= FDE_READ | FDE_ERROR;
}
#endif
auto it = ctx->poll_node_map_.find(pollfd.fd);
CHECK(it != ctx->poll_node_map_.end());
fdevent* fde = it->second.fde;
if (events == 0) {
// Check for timeout.
if (fde->timeout) {
auto deadline = fde->last_active + *fde->timeout;
if (deadline < post_poll) {
events |= FDE_TIMEOUT;
}
}
}
if (events != 0) {
CHECK_EQ(fde->fd.get(), pollfd.fd);
fde->events |= events;
fde->last_active = post_poll;
D("%s got events %x", dump_fde(fde).c_str(), events);
fde->state |= FDE_PENDING;
ctx->pending_list_.push_back(fde);
}
}
}
template <class T>
struct always_false : std::false_type {};
static void fdevent_call_fdfunc(fdevent* fde) {
unsigned events = fde->events;
fde->events = 0;
CHECK(fde->state & FDE_PENDING);
fde->state &= (~FDE_PENDING);
D("fdevent_call_fdfunc %s", dump_fde(fde).c_str());
std::visit(
[&](auto&& f) {
using F = std::decay_t<decltype(f)>;
if constexpr (std::is_same_v<fd_func, F>) {
f(fde->fd.get(), events, fde->arg);
} else if constexpr (std::is_same_v<fd_func2, F>) {
f(fde, events, fde->arg);
} else {
static_assert(always_false<F>::value, "non-exhaustive visitor");
}
},
fde->func);
}
static void fdevent_check_spin(fdevent_context_poll* ctx, uint64_t cycle) {
// Check to see if we're spinning because we forgot about an fdevent
// by keeping track of how long fdevents have been continuously pending.
struct SpinCheck {
fdevent* fde;
android::base::boot_clock::time_point timestamp;
uint64_t cycle;
};
// TODO: Move this into the base fdevent_context.
static auto& g_continuously_pending = *new std::unordered_map<uint64_t, SpinCheck>();
static auto last_cycle = android::base::boot_clock::now();
auto now = android::base::boot_clock::now();
if (now - last_cycle > 10ms) {
// We're not spinning.
g_continuously_pending.clear();
last_cycle = now;
return;
}
last_cycle = now;
for (auto* fde : ctx->pending_list_) {
auto it = g_continuously_pending.find(fde->id);
if (it == g_continuously_pending.end()) {
g_continuously_pending[fde->id] =
SpinCheck{.fde = fde, .timestamp = now, .cycle = cycle};
} else {
it->second.cycle = cycle;
}
}
for (auto it = g_continuously_pending.begin(); it != g_continuously_pending.end();) {
if (it->second.cycle != cycle) {
it = g_continuously_pending.erase(it);
} else {
// Use an absurdly long window, since all we really care about is
// getting a bugreport eventually.
if (now - it->second.timestamp > 300s) {
LOG(FATAL_WITHOUT_ABORT)
<< "detected spin in fdevent: " << dump_fde(it->second.fde);
#if defined(__linux__)
int fd = it->second.fde->fd.get();
std::string fd_path = android::base::StringPrintf("/proc/self/fd/%d", fd);
std::string path;
if (!android::base::Readlink(fd_path, &path)) {
PLOG(FATAL_WITHOUT_ABORT) << "readlink of fd " << fd << " failed";
}
LOG(FATAL_WITHOUT_ABORT) << "fd " << fd << " = " << path;
#endif
abort();
}
++it;
}
}
}
void fdevent_context_poll::Loop() {
main_thread_id_ = android::base::GetThreadId();
uint64_t cycle = 0;
while (true) {
if (terminate_loop_) {
break;
}
D("--- --- waiting for events");
std::vector<adb_pollfd> pollfds;
for (const auto& [fd, fde] : this->installed_fdevents_) {
adb_pollfd pfd;
pfd.fd = fd;
pfd.events = 0;
if (fde->state & FDE_READ) {
pfd.events |= POLLIN;
}
if (fde->state & FDE_WRITE) {
pfd.events |= POLLOUT;
}
if (fde->state & FDE_ERROR) {
pfd.events |= POLLERR;
}
#if defined(__linux__)
pfd.events |= POLLRDHUP;
#endif
pfd.revents = 0;
pollfds.push_back(pfd);
}
CHECK_GT(pollfds.size(), 0u);
D("poll(), pollfds = %s", dump_pollfds(pollfds).c_str());
fdevent_process(this);
fdevent_check_spin(this, cycle++);
while (!pending_list_.empty()) {
fdevent* fde = pending_list_.front();
pending_list_.pop_front();
fdevent_call_fdfunc(fde);
auto timeout = CalculatePollDuration();
int timeout_ms;
if (!timeout) {
timeout_ms = -1;
} else {
timeout_ms = timeout->count();
}
this->FlushRunQueue();
int ret = adb_poll(pollfds.data(), pollfds.size(), timeout_ms);
if (ret == -1) {
PLOG(ERROR) << "poll(), ret = " << ret;
return;
}
auto post_poll = std::chrono::steady_clock::now();
std::vector<fdevent_event> poll_events;
for (const auto& pollfd : pollfds) {
unsigned events = 0;
if (pollfd.revents & POLLIN) {
events |= FDE_READ;
}
if (pollfd.revents & POLLOUT) {
events |= FDE_WRITE;
}
if (pollfd.revents & (POLLERR | POLLHUP | POLLNVAL)) {
// We fake a read, as the rest of the code assumes that errors will
// be detected at that point.
events |= FDE_READ | FDE_ERROR;
}
#if defined(__linux__)
if (pollfd.revents & POLLRDHUP) {
events |= FDE_READ | FDE_ERROR;
}
#endif
auto it = this->installed_fdevents_.find(pollfd.fd);
CHECK(it != this->installed_fdevents_.end());
fdevent* fde = it->second;
if (events == 0) {
if (fde->timeout) {
auto deadline = fde->last_active + *fde->timeout;
if (deadline < post_poll) {
events |= FDE_TIMEOUT;
}
}
}
if (events != 0) {
D("%s got events %x", dump_fde(fde).c_str(), events);
poll_events.push_back({fde, events});
fde->last_active = post_poll;
}
}
this->HandleEvents(std::move(poll_events));
}
main_thread_id_.reset();
@ -351,7 +194,7 @@ void fdevent_context_poll::Loop() {
size_t fdevent_context_poll::InstalledCount() {
// We always have an installed fde for interrupt.
return poll_node_map_.size() - 1;
return this->installed_fdevents_.size() - 1;
}
void fdevent_context_poll::Interrupt() {

View file

@ -48,9 +48,6 @@ struct fdevent_context_poll : public fdevent_context {
fdevent_context_poll();
virtual ~fdevent_context_poll();
virtual void Register(fdevent* fde) final;
virtual void Unregister(fdevent* fde) final;
virtual void Set(fdevent* fde, unsigned events) final;
virtual void Loop() final;
@ -61,11 +58,6 @@ struct fdevent_context_poll : public fdevent_context {
virtual void Interrupt() final;
public:
// All operations to fdevent should happen only in the main thread.
// That's why we don't need a lock for fdevent.
std::unordered_map<int, PollNode> poll_node_map_;
std::list<fdevent*> pending_list_;
unique_fd interrupt_fd_;
fdevent* interrupt_fde_ = nullptr;
};