2016-10-19 03:17:52 +02:00
|
|
|
/*
|
|
|
|
* Copyright 2016, The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
#include <deque>
|
2017-06-02 22:02:10 +02:00
|
|
|
#include <string>
|
2016-10-19 03:17:52 +02:00
|
|
|
#include <unordered_map>
|
2017-06-02 22:02:10 +02:00
|
|
|
#include <utility>
|
2016-10-19 03:17:52 +02:00
|
|
|
|
|
|
|
#include <event2/event.h>
|
|
|
|
#include <event2/listener.h>
|
|
|
|
#include <event2/thread.h>
|
|
|
|
|
2019-01-10 02:01:49 +01:00
|
|
|
#include <android-base/cmsg.h>
|
2016-10-19 03:17:52 +02:00
|
|
|
#include <android-base/logging.h>
|
2017-06-26 22:54:05 +02:00
|
|
|
#include <android-base/properties.h>
|
2016-10-19 03:17:52 +02:00
|
|
|
#include <android-base/stringprintf.h>
|
|
|
|
#include <android-base/unique_fd.h>
|
|
|
|
#include <cutils/sockets.h>
|
|
|
|
|
2017-03-06 21:24:07 +01:00
|
|
|
#include "debuggerd/handler.h"
|
2017-05-24 16:07:25 +02:00
|
|
|
#include "dump_type.h"
|
2017-05-10 11:58:59 +02:00
|
|
|
#include "protocol.h"
|
|
|
|
#include "util.h"
|
2016-10-19 03:17:52 +02:00
|
|
|
|
|
|
|
#include "intercept_manager.h"
|
|
|
|
|
2017-06-26 22:54:05 +02:00
|
|
|
using android::base::GetIntProperty;
|
2019-01-10 02:01:49 +01:00
|
|
|
using android::base::SendFileDescriptors;
|
2016-10-19 03:17:52 +02:00
|
|
|
using android::base::StringPrintf;
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
using android::base::borrowed_fd;
|
2016-10-19 03:17:52 +02:00
|
|
|
using android::base::unique_fd;
|
|
|
|
|
|
|
|
static InterceptManager* intercept_manager;
|
|
|
|
|
|
|
|
enum CrashStatus {
|
|
|
|
kCrashStatusRunning,
|
|
|
|
kCrashStatusQueued,
|
|
|
|
};
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
struct CrashArtifact {
|
|
|
|
unique_fd fd;
|
|
|
|
|
|
|
|
static CrashArtifact devnull() {
|
|
|
|
CrashArtifact result;
|
|
|
|
result.fd.reset(open("/dev/null", O_WRONLY | O_CLOEXEC));
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CrashArtifactPaths {
|
|
|
|
std::string text;
|
|
|
|
std::optional<std::string> proto;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CrashOutput {
|
|
|
|
CrashArtifact text;
|
|
|
|
std::optional<CrashArtifact> proto;
|
|
|
|
};
|
|
|
|
|
2017-06-26 22:54:05 +02:00
|
|
|
// Ownership of Crash is a bit messy.
|
|
|
|
// It's either owned by an active event that must have a timeout, or owned by
|
|
|
|
// queued_requests, in the case that multiple crashes come in at the same time.
|
|
|
|
struct Crash {
|
|
|
|
~Crash() { event_free(crash_event); }
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
CrashOutput output;
|
2018-04-19 03:11:01 +02:00
|
|
|
unique_fd crash_socket_fd;
|
2017-06-26 22:54:05 +02:00
|
|
|
pid_t crash_pid;
|
|
|
|
event* crash_event = nullptr;
|
|
|
|
|
|
|
|
DebuggerdDumpType crash_type;
|
|
|
|
};
|
2017-05-15 16:59:30 +02:00
|
|
|
|
2017-05-24 16:07:25 +02:00
|
|
|
class CrashQueue {
|
2017-05-15 16:59:30 +02:00
|
|
|
public:
|
2017-05-24 16:07:25 +02:00
|
|
|
CrashQueue(const std::string& dir_path, const std::string& file_name_prefix, size_t max_artifacts,
|
2024-03-19 01:15:57 +01:00
|
|
|
size_t max_concurrent_dumps, bool supports_proto, bool world_readable)
|
2017-05-15 16:59:30 +02:00
|
|
|
: file_name_prefix_(file_name_prefix),
|
|
|
|
dir_path_(dir_path),
|
|
|
|
dir_fd_(open(dir_path.c_str(), O_DIRECTORY | O_RDONLY | O_CLOEXEC)),
|
|
|
|
max_artifacts_(max_artifacts),
|
|
|
|
next_artifact_(0),
|
|
|
|
max_concurrent_dumps_(max_concurrent_dumps),
|
2021-01-27 00:53:11 +01:00
|
|
|
num_concurrent_dumps_(0),
|
2024-03-19 01:15:57 +01:00
|
|
|
supports_proto_(supports_proto),
|
|
|
|
world_readable_(world_readable) {
|
2017-05-15 16:59:30 +02:00
|
|
|
if (dir_fd_ == -1) {
|
|
|
|
PLOG(FATAL) << "failed to open directory: " << dir_path;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: If max_artifacts_ <= max_concurrent_dumps_, then theoretically the
|
|
|
|
// same filename could be handed out to multiple processes.
|
|
|
|
CHECK(max_artifacts_ > max_concurrent_dumps_);
|
|
|
|
|
|
|
|
find_oldest_artifact();
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2017-06-26 22:54:05 +02:00
|
|
|
static CrashQueue* for_crash(const Crash* crash) {
|
|
|
|
return (crash->crash_type == kDebuggerdJavaBacktrace) ? for_anrs() : for_tombstones();
|
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
static CrashQueue* for_crash(const std::unique_ptr<Crash>& crash) {
|
|
|
|
return for_crash(crash.get());
|
|
|
|
}
|
|
|
|
|
2017-06-26 22:54:05 +02:00
|
|
|
static CrashQueue* for_tombstones() {
|
|
|
|
static CrashQueue queue("/data/tombstones", "tombstone_" /* file_name_prefix */,
|
2019-09-26 23:35:24 +02:00
|
|
|
GetIntProperty("tombstoned.max_tombstone_count", 32),
|
2024-03-19 01:15:57 +01:00
|
|
|
1 /* max_concurrent_dumps */, true /* supports_proto */,
|
|
|
|
true /* world_readable */);
|
2017-06-26 22:54:05 +02:00
|
|
|
return &queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CrashQueue* for_anrs() {
|
|
|
|
static CrashQueue queue("/data/anr", "trace_" /* file_name_prefix */,
|
|
|
|
GetIntProperty("tombstoned.max_anr_count", 64),
|
2024-03-19 01:15:57 +01:00
|
|
|
4 /* max_concurrent_dumps */, false /* supports_proto */,
|
|
|
|
false /* world_readable */);
|
2017-06-26 22:54:05 +02:00
|
|
|
return &queue;
|
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
CrashArtifact create_temporary_file() const {
|
|
|
|
CrashArtifact result;
|
|
|
|
|
|
|
|
std::optional<std::string> path;
|
2021-02-02 01:48:25 +01:00
|
|
|
result.fd.reset(openat(dir_fd_, ".", O_WRONLY | O_APPEND | O_TMPFILE | O_CLOEXEC, 0660));
|
2021-01-27 00:53:11 +01:00
|
|
|
if (result.fd == -1) {
|
2024-02-26 23:18:21 +01:00
|
|
|
PLOG(FATAL) << "failed to create temporary tombstone in " << dir_path_;
|
2021-01-27 00:53:11 +01:00
|
|
|
}
|
2024-02-26 23:18:21 +01:00
|
|
|
|
2024-03-19 01:15:57 +01:00
|
|
|
if (world_readable_) {
|
|
|
|
// We need to fchmodat after creating to avoid getting the umask applied.
|
|
|
|
std::string fd_path = StringPrintf("/proc/self/fd/%d", result.fd.get());
|
|
|
|
if (fchmodat(dir_fd_, fd_path.c_str(), 0664, 0) != 0) {
|
|
|
|
PLOG(ERROR) << "Failed to make tombstone world-readable";
|
|
|
|
}
|
2024-02-23 21:54:27 +01:00
|
|
|
}
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
return std::move(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::optional<CrashOutput> get_output(DebuggerdDumpType dump_type) {
|
|
|
|
CrashOutput result;
|
|
|
|
|
|
|
|
switch (dump_type) {
|
|
|
|
case kDebuggerdNativeBacktrace:
|
2021-05-19 01:14:15 +02:00
|
|
|
// Don't generate tombstones for native backtrace requests.
|
2021-01-27 00:53:11 +01:00
|
|
|
return {};
|
|
|
|
|
|
|
|
case kDebuggerdTombstoneProto:
|
|
|
|
if (!supports_proto_) {
|
|
|
|
LOG(ERROR) << "received kDebuggerdTombstoneProto on a queue that doesn't support proto";
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
result.proto = create_temporary_file();
|
|
|
|
result.text = create_temporary_file();
|
|
|
|
break;
|
|
|
|
|
2021-05-19 01:14:15 +02:00
|
|
|
case kDebuggerdJavaBacktrace:
|
2021-01-27 00:53:11 +01:00
|
|
|
case kDebuggerdTombstone:
|
|
|
|
result.text = create_temporary_file();
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
LOG(ERROR) << "unexpected dump type: " << dump_type;
|
|
|
|
return {};
|
2017-05-15 16:59:30 +02:00
|
|
|
}
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
return result;
|
2018-04-19 03:11:01 +02:00
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
borrowed_fd dir_fd() { return dir_fd_; }
|
|
|
|
|
|
|
|
CrashArtifactPaths get_next_artifact_paths() {
|
|
|
|
CrashArtifactPaths result;
|
|
|
|
result.text = StringPrintf("%s%02d", file_name_prefix_.c_str(), next_artifact_);
|
|
|
|
|
|
|
|
if (supports_proto_) {
|
|
|
|
result.proto = StringPrintf("%s%02d.pb", file_name_prefix_.c_str(), next_artifact_);
|
|
|
|
}
|
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
next_artifact_ = (next_artifact_ + 1) % max_artifacts_;
|
2021-01-27 00:53:11 +01:00
|
|
|
return result;
|
2017-05-15 16:59:30 +02:00
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
// Consumes crash if it returns true, otherwise leaves it untouched.
|
|
|
|
bool maybe_enqueue_crash(std::unique_ptr<Crash>&& crash) {
|
2017-05-15 16:59:30 +02:00
|
|
|
if (num_concurrent_dumps_ == max_concurrent_dumps_) {
|
2021-01-27 00:53:11 +01:00
|
|
|
queued_requests_.emplace_back(std::move(crash));
|
2017-05-15 16:59:30 +02:00
|
|
|
return true;
|
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
return false;
|
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
void maybe_dequeue_crashes(void (*handler)(std::unique_ptr<Crash> crash)) {
|
2017-05-15 16:59:30 +02:00
|
|
|
while (!queued_requests_.empty() && num_concurrent_dumps_ < max_concurrent_dumps_) {
|
2021-01-27 00:53:11 +01:00
|
|
|
std::unique_ptr<Crash> next_crash = std::move(queued_requests_.front());
|
2017-05-15 16:59:30 +02:00
|
|
|
queued_requests_.pop_front();
|
2021-01-27 00:53:11 +01:00
|
|
|
handler(std::move(next_crash));
|
2017-05-15 16:59:30 +02:00
|
|
|
}
|
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
void on_crash_started() { ++num_concurrent_dumps_; }
|
|
|
|
|
|
|
|
void on_crash_completed() { --num_concurrent_dumps_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
void find_oldest_artifact() {
|
|
|
|
size_t oldest_tombstone = 0;
|
|
|
|
time_t oldest_time = std::numeric_limits<time_t>::max();
|
|
|
|
|
|
|
|
for (size_t i = 0; i < max_artifacts_; ++i) {
|
2021-01-27 00:53:11 +01:00
|
|
|
std::string path =
|
|
|
|
StringPrintf("%s/%s%02zu", dir_path_.c_str(), file_name_prefix_.c_str(), i);
|
2017-05-15 16:59:30 +02:00
|
|
|
struct stat st;
|
|
|
|
if (stat(path.c_str(), &st) != 0) {
|
|
|
|
if (errno == ENOENT) {
|
|
|
|
oldest_tombstone = i;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "failed to stat " << path;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (st.st_mtime < oldest_time) {
|
2017-01-24 00:56:35 +01:00
|
|
|
oldest_tombstone = i;
|
2017-05-15 16:59:30 +02:00
|
|
|
oldest_time = st.st_mtime;
|
2017-01-24 00:56:35 +01:00
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
next_artifact_ = oldest_tombstone;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
const std::string file_name_prefix_;
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
const std::string dir_path_;
|
|
|
|
const int dir_fd_;
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
const size_t max_artifacts_;
|
|
|
|
int next_artifact_;
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2017-05-15 16:59:30 +02:00
|
|
|
const size_t max_concurrent_dumps_;
|
|
|
|
size_t num_concurrent_dumps_;
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
bool supports_proto_;
|
2024-03-19 01:15:57 +01:00
|
|
|
bool world_readable_;
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
std::deque<std::unique_ptr<Crash>> queued_requests_;
|
2017-05-15 16:59:30 +02:00
|
|
|
|
2017-05-24 16:07:25 +02:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(CrashQueue);
|
2017-05-15 16:59:30 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Whether java trace dumps are produced via tombstoned.
|
2017-06-02 16:42:06 +02:00
|
|
|
static constexpr bool kJavaTraceDumpsEnabled = true;
|
2017-05-15 16:59:30 +02:00
|
|
|
|
|
|
|
// Forward declare the callbacks so they can be placed in a sensible order.
|
2021-01-27 00:53:11 +01:00
|
|
|
static void crash_accept_cb(evconnlistener* listener, evutil_socket_t sockfd, sockaddr*, int,
|
|
|
|
void*);
|
2017-05-15 16:59:30 +02:00
|
|
|
static void crash_request_cb(evutil_socket_t sockfd, short ev, void* arg);
|
|
|
|
static void crash_completed_cb(evutil_socket_t sockfd, short ev, void* arg);
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
static void perform_request(std::unique_ptr<Crash> crash) {
|
2016-10-19 03:17:52 +02:00
|
|
|
unique_fd output_fd;
|
2023-10-17 04:14:28 +02:00
|
|
|
if (intercept_manager->FindIntercept(crash->crash_pid, crash->crash_type, &output_fd)) {
|
2021-01-27 00:53:11 +01:00
|
|
|
if (crash->crash_type == kDebuggerdTombstoneProto) {
|
|
|
|
crash->output.proto = CrashArtifact::devnull();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (auto o = CrashQueue::for_crash(crash.get())->get_output(crash->crash_type); o) {
|
|
|
|
crash->output = std::move(*o);
|
|
|
|
output_fd.reset(dup(crash->output.text.fd));
|
2018-09-12 23:51:03 +02:00
|
|
|
} else {
|
2021-01-27 00:53:11 +01:00
|
|
|
LOG(ERROR) << "failed to get crash output for type " << crash->crash_type;
|
|
|
|
return;
|
2018-09-12 23:51:03 +02:00
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
TombstonedCrashPacket response = {.packet_type = CrashPacketType::kPerformDump};
|
|
|
|
|
|
|
|
ssize_t rc = -1;
|
|
|
|
if (crash->output.proto) {
|
|
|
|
rc = SendFileDescriptors(crash->crash_socket_fd, &response, sizeof(response), output_fd.get(),
|
|
|
|
crash->output.proto->fd.get());
|
|
|
|
} else {
|
|
|
|
rc = SendFileDescriptors(crash->crash_socket_fd, &response, sizeof(response), output_fd.get());
|
|
|
|
}
|
|
|
|
|
2019-01-10 02:01:49 +01:00
|
|
|
output_fd.reset();
|
|
|
|
|
2016-10-19 03:17:52 +02:00
|
|
|
if (rc == -1) {
|
|
|
|
PLOG(WARNING) << "failed to send response to CrashRequest";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
} else if (rc != sizeof(response)) {
|
|
|
|
PLOG(WARNING) << "crash socket write returned short";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2021-01-26 13:36:12 +01:00
|
|
|
}
|
2020-12-02 06:04:09 +01:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
// TODO: Make this configurable by the interceptor?
|
2021-03-11 21:51:25 +01:00
|
|
|
struct timeval timeout = {10 * android::base::HwTimeoutMultiplier(), 0};
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
event_base* base = event_get_base(crash->crash_event);
|
|
|
|
|
|
|
|
event_assign(crash->crash_event, base, crash->crash_socket_fd, EV_TIMEOUT | EV_READ,
|
|
|
|
crash_completed_cb, crash.get());
|
|
|
|
event_add(crash->crash_event, &timeout);
|
2017-06-26 22:54:05 +02:00
|
|
|
CrashQueue::for_crash(crash)->on_crash_started();
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
// The crash is now owned by the event loop.
|
|
|
|
crash.release();
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void crash_accept_cb(evconnlistener* listener, evutil_socket_t sockfd, sockaddr*, int,
|
2017-05-24 16:07:25 +02:00
|
|
|
void*) {
|
2016-10-19 03:17:52 +02:00
|
|
|
event_base* base = evconnlistener_get_base(listener);
|
|
|
|
Crash* crash = new Crash();
|
|
|
|
|
2017-05-24 16:07:25 +02:00
|
|
|
// TODO: Make sure that only java crashes come in on the java socket
|
|
|
|
// and only native crashes on the native socket.
|
2021-03-11 21:51:25 +01:00
|
|
|
struct timeval timeout = {1 * android::base::HwTimeoutMultiplier(), 0};
|
2016-10-19 03:17:52 +02:00
|
|
|
event* crash_event = event_new(base, sockfd, EV_TIMEOUT | EV_READ, crash_request_cb, crash);
|
2018-04-19 03:11:01 +02:00
|
|
|
crash->crash_socket_fd.reset(sockfd);
|
2016-10-19 03:17:52 +02:00
|
|
|
crash->crash_event = crash_event;
|
|
|
|
event_add(crash_event, &timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void crash_request_cb(evutil_socket_t sockfd, short ev, void* arg) {
|
2021-01-27 00:53:11 +01:00
|
|
|
std::unique_ptr<Crash> crash(static_cast<Crash*>(arg));
|
2016-10-19 03:17:52 +02:00
|
|
|
TombstonedCrashPacket request = {};
|
|
|
|
|
|
|
|
if ((ev & EV_TIMEOUT) != 0) {
|
|
|
|
LOG(WARNING) << "crash request timed out";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
} else if ((ev & EV_READ) == 0) {
|
|
|
|
LOG(WARNING) << "tombstoned received unexpected event from crash socket";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
ssize_t rc = TEMP_FAILURE_RETRY(read(sockfd, &request, sizeof(request)));
|
2016-10-19 03:17:52 +02:00
|
|
|
if (rc == -1) {
|
|
|
|
PLOG(WARNING) << "failed to read from crash socket";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
} else if (rc != sizeof(request)) {
|
|
|
|
LOG(WARNING) << "crash socket received short read of length " << rc << " (expected "
|
|
|
|
<< sizeof(request) << ")";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (request.packet_type != CrashPacketType::kDumpRequest) {
|
|
|
|
LOG(WARNING) << "unexpected crash packet type, expected kDumpRequest, received "
|
|
|
|
<< StringPrintf("%#2hhX", request.packet_type);
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2017-05-24 16:07:25 +02:00
|
|
|
crash->crash_type = request.packet.dump_request.dump_type;
|
2021-01-27 00:53:11 +01:00
|
|
|
if (crash->crash_type < 0 || crash->crash_type > kDebuggerdTombstoneProto) {
|
2017-05-24 16:07:25 +02:00
|
|
|
LOG(WARNING) << "unexpected crash dump type: " << crash->crash_type;
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2017-05-24 16:07:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (crash->crash_type != kDebuggerdJavaBacktrace) {
|
2017-05-15 16:59:30 +02:00
|
|
|
crash->crash_pid = request.packet.dump_request.pid;
|
|
|
|
} else {
|
|
|
|
// Requests for java traces are sent from untrusted processes, so we
|
|
|
|
// must not trust the PID sent down with the request. Instead, we ask the
|
|
|
|
// kernel.
|
|
|
|
ucred cr = {};
|
|
|
|
socklen_t len = sizeof(cr);
|
|
|
|
int ret = getsockopt(sockfd, SOL_SOCKET, SO_PEERCRED, &cr, &len);
|
|
|
|
if (ret != 0) {
|
|
|
|
PLOG(ERROR) << "Failed to getsockopt(..SO_PEERCRED)";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2017-05-15 16:59:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
crash->crash_pid = cr.pid;
|
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
pid_t crash_pid = crash->crash_pid;
|
|
|
|
LOG(INFO) << "received crash request for pid " << crash_pid;
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
if (CrashQueue::for_crash(crash)->maybe_enqueue_crash(std::move(crash))) {
|
|
|
|
LOG(INFO) << "enqueueing crash request for pid " << crash_pid;
|
2016-10-19 03:17:52 +02:00
|
|
|
} else {
|
2021-01-27 00:53:11 +01:00
|
|
|
perform_request(std::move(crash));
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
2021-01-27 00:53:11 +01:00
|
|
|
}
|
2021-01-26 13:36:12 +01:00
|
|
|
|
2021-02-06 01:38:35 +01:00
|
|
|
static bool rename_tombstone_fd(borrowed_fd fd, borrowed_fd dirfd, const std::string& path) {
|
|
|
|
// Always try to unlink the tombstone file.
|
|
|
|
// linkat doesn't let us replace a file, so we need to unlink before linking
|
|
|
|
// our results onto disk, and if we fail for some reason, we should delete
|
|
|
|
// stale tombstones to avoid confusing inconsistency.
|
|
|
|
int rc = unlinkat(dirfd.get(), path.c_str(), 0);
|
|
|
|
if (rc != 0 && errno != ENOENT) {
|
|
|
|
PLOG(ERROR) << "failed to unlink tombstone at " << path;
|
|
|
|
return false;
|
|
|
|
}
|
2021-01-26 13:36:12 +01:00
|
|
|
|
2024-02-23 23:16:01 +01:00
|
|
|
// This fd is created inside of dirfd in CrashQueue::create_temporary_file.
|
2021-02-06 01:38:35 +01:00
|
|
|
std::string fd_path = StringPrintf("/proc/self/fd/%d", fd.get());
|
|
|
|
rc = linkat(AT_FDCWD, fd_path.c_str(), dirfd.get(), path.c_str(), AT_SYMLINK_FOLLOW);
|
2021-01-27 00:53:11 +01:00
|
|
|
if (rc != 0) {
|
2021-02-06 01:38:35 +01:00
|
|
|
PLOG(ERROR) << "failed to link tombstone at " << path;
|
2021-01-27 00:53:11 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
static void crash_completed(borrowed_fd sockfd, std::unique_ptr<Crash> crash) {
|
2016-10-19 03:17:52 +02:00
|
|
|
TombstonedCrashPacket request = {};
|
2021-01-27 00:53:11 +01:00
|
|
|
CrashQueue* queue = CrashQueue::for_crash(crash);
|
2016-10-19 03:17:52 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
ssize_t rc = TEMP_FAILURE_RETRY(read(sockfd.get(), &request, sizeof(request)));
|
2016-10-19 03:17:52 +02:00
|
|
|
if (rc == -1) {
|
|
|
|
PLOG(WARNING) << "failed to read from crash socket";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
} else if (rc != sizeof(request)) {
|
|
|
|
LOG(WARNING) << "crash socket received short read of length " << rc << " (expected "
|
|
|
|
<< sizeof(request) << ")";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (request.packet_type != CrashPacketType::kCompletedDump) {
|
|
|
|
LOG(WARNING) << "unexpected crash packet type, expected kCompletedDump, received "
|
|
|
|
<< uint32_t(request.packet_type);
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
2021-09-01 22:36:03 +02:00
|
|
|
if (crash->output.text.fd == -1) {
|
|
|
|
LOG(WARNING) << "skipping tombstone file creation due to intercept";
|
2021-01-27 00:53:11 +01:00
|
|
|
return;
|
|
|
|
}
|
2018-05-04 01:05:32 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
CrashArtifactPaths paths = queue->get_next_artifact_paths();
|
2020-11-19 00:56:36 +01:00
|
|
|
|
2024-03-22 00:56:07 +01:00
|
|
|
if (crash->output.proto && crash->output.proto->fd != -1) {
|
|
|
|
if (!paths.proto) {
|
|
|
|
LOG(ERROR) << "missing path for proto tombstone";
|
|
|
|
} else {
|
|
|
|
rename_tombstone_fd(crash->output.proto->fd, queue->dir_fd(), *paths.proto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-06 01:38:35 +01:00
|
|
|
if (rename_tombstone_fd(crash->output.text.fd, queue->dir_fd(), paths.text)) {
|
|
|
|
if (crash->crash_type == kDebuggerdJavaBacktrace) {
|
|
|
|
LOG(ERROR) << "Traces for pid " << crash->crash_pid << " written to: " << paths.text;
|
2017-06-21 20:42:00 +02:00
|
|
|
} else {
|
2021-02-06 01:38:35 +01:00
|
|
|
// NOTE: Several tools parse this log message to figure out where the
|
|
|
|
// tombstone associated with a given native crash was written. Any changes
|
|
|
|
// to this message must be carefully considered.
|
|
|
|
LOG(ERROR) << "Tombstone written to: " << paths.text;
|
2017-06-21 20:42:00 +02:00
|
|
|
}
|
2021-01-27 00:53:11 +01:00
|
|
|
}
|
|
|
|
}
|
2017-06-02 22:02:10 +02:00
|
|
|
|
2021-01-27 00:53:11 +01:00
|
|
|
static void crash_completed_cb(evutil_socket_t sockfd, short ev, void* arg) {
|
|
|
|
std::unique_ptr<Crash> crash(static_cast<Crash*>(arg));
|
2017-06-26 22:54:05 +02:00
|
|
|
CrashQueue* queue = CrashQueue::for_crash(crash);
|
2021-01-27 00:53:11 +01:00
|
|
|
|
|
|
|
queue->on_crash_completed();
|
|
|
|
|
|
|
|
if ((ev & EV_READ) == EV_READ) {
|
|
|
|
crash_completed(sockfd, std::move(crash));
|
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
|
|
|
|
// If there's something queued up, let them proceed.
|
2017-05-24 16:07:25 +02:00
|
|
|
queue->maybe_dequeue_crashes(perform_request);
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int, char* []) {
|
2021-02-02 01:48:25 +01:00
|
|
|
umask(0117);
|
2017-03-06 21:23:55 +01:00
|
|
|
|
2017-03-06 21:24:07 +01:00
|
|
|
// Don't try to connect to ourselves if we crash.
|
|
|
|
struct sigaction action = {};
|
|
|
|
action.sa_handler = [](int signal) {
|
|
|
|
LOG(ERROR) << "received fatal signal " << signal;
|
|
|
|
_exit(1);
|
|
|
|
};
|
|
|
|
debuggerd_register_handlers(&action);
|
|
|
|
|
2016-10-19 03:17:52 +02:00
|
|
|
int intercept_socket = android_get_control_socket(kTombstonedInterceptSocketName);
|
|
|
|
int crash_socket = android_get_control_socket(kTombstonedCrashSocketName);
|
|
|
|
|
|
|
|
if (intercept_socket == -1 || crash_socket == -1) {
|
|
|
|
PLOG(FATAL) << "failed to get socket from init";
|
|
|
|
}
|
|
|
|
|
|
|
|
evutil_make_socket_nonblocking(intercept_socket);
|
|
|
|
evutil_make_socket_nonblocking(crash_socket);
|
|
|
|
|
|
|
|
event_base* base = event_base_new();
|
|
|
|
if (!base) {
|
|
|
|
LOG(FATAL) << "failed to create event_base";
|
|
|
|
}
|
|
|
|
|
|
|
|
intercept_manager = new InterceptManager(base, intercept_socket);
|
|
|
|
|
2017-09-13 14:12:34 +02:00
|
|
|
evconnlistener* tombstone_listener =
|
|
|
|
evconnlistener_new(base, crash_accept_cb, CrashQueue::for_tombstones(), LEV_OPT_CLOSE_ON_FREE,
|
|
|
|
-1 /* backlog */, crash_socket);
|
2017-05-15 16:59:30 +02:00
|
|
|
if (!tombstone_listener) {
|
|
|
|
LOG(FATAL) << "failed to create evconnlistener for tombstones.";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kJavaTraceDumpsEnabled) {
|
|
|
|
const int java_trace_socket = android_get_control_socket(kTombstonedJavaTraceSocketName);
|
|
|
|
if (java_trace_socket == -1) {
|
|
|
|
PLOG(FATAL) << "failed to get socket from init";
|
|
|
|
}
|
|
|
|
|
|
|
|
evutil_make_socket_nonblocking(java_trace_socket);
|
2017-09-13 14:12:34 +02:00
|
|
|
evconnlistener* java_trace_listener =
|
|
|
|
evconnlistener_new(base, crash_accept_cb, CrashQueue::for_anrs(), LEV_OPT_CLOSE_ON_FREE,
|
|
|
|
-1 /* backlog */, java_trace_socket);
|
2017-05-15 16:59:30 +02:00
|
|
|
if (!java_trace_listener) {
|
|
|
|
LOG(FATAL) << "failed to create evconnlistener for java traces.";
|
|
|
|
}
|
2016-10-19 03:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
LOG(INFO) << "tombstoned successfully initialized";
|
|
|
|
event_base_dispatch(base);
|
|
|
|
}
|