2008-10-21 16:00:00 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2017-04-07 01:30:22 +02:00
|
|
|
#include "init.h"
|
|
|
|
|
2015-02-28 15:39:11 +01:00
|
|
|
#include <dirent.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <fcntl.h>
|
2018-02-02 02:14:30 +01:00
|
|
|
#include <pthread.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <signal.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2020-03-27 04:32:17 +01:00
|
|
|
#include <sys/eventfd.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
#include <sys/mount.h>
|
2017-09-06 22:43:57 +02:00
|
|
|
#include <sys/signalfd.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2019-04-23 02:46:37 +02:00
|
|
|
#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
|
|
|
|
#include <sys/_system_properties.h>
|
|
|
|
|
2019-06-26 23:44:37 +02:00
|
|
|
#include <functional>
|
2018-05-01 22:39:52 +02:00
|
|
|
#include <map>
|
|
|
|
#include <memory>
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
#include <mutex>
|
2018-05-01 22:39:52 +02:00
|
|
|
#include <optional>
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
#include <thread>
|
2019-06-26 23:44:37 +02:00
|
|
|
#include <vector>
|
2018-05-01 22:39:52 +02:00
|
|
|
|
2017-03-24 19:43:02 +01:00
|
|
|
#include <android-base/chrono_utils.h>
|
2015-12-05 07:00:26 +01:00
|
|
|
#include <android-base/file.h>
|
2017-04-07 01:30:22 +02:00
|
|
|
#include <android-base/logging.h>
|
2019-03-27 16:10:41 +01:00
|
|
|
#include <android-base/parseint.h>
|
2017-03-29 01:40:41 +02:00
|
|
|
#include <android-base/properties.h>
|
2018-02-21 19:37:44 +01:00
|
|
|
#include <android-base/stringprintf.h>
|
2015-12-05 07:00:26 +01:00
|
|
|
#include <android-base/strings.h>
|
2020-03-25 02:00:23 +01:00
|
|
|
#include <backtrace/Backtrace.h>
|
2019-03-04 10:53:34 +01:00
|
|
|
#include <fs_avb/fs_avb.h>
|
2018-10-29 10:31:48 +01:00
|
|
|
#include <fs_mgr_vendor_overlay.h>
|
2017-05-10 02:09:06 +02:00
|
|
|
#include <keyutils.h>
|
2017-02-14 15:06:20 +01:00
|
|
|
#include <libavb/libavb.h>
|
2019-02-14 21:46:13 +01:00
|
|
|
#include <libgsi/libgsi.h>
|
2018-12-21 20:41:50 +01:00
|
|
|
#include <processgroup/processgroup.h>
|
2019-03-06 00:47:16 +01:00
|
|
|
#include <processgroup/setup.h>
|
2018-09-06 00:37:26 +02:00
|
|
|
#include <selinux/android.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2018-02-14 00:25:29 +01:00
|
|
|
#include "action_parser.h"
|
2019-06-26 23:44:37 +02:00
|
|
|
#include "builtins.h"
|
2015-10-25 01:20:18 +02:00
|
|
|
#include "epoll.h"
|
2019-05-08 21:44:50 +02:00
|
|
|
#include "first_stage_init.h"
|
2018-07-21 00:18:04 +02:00
|
|
|
#include "first_stage_mount.h"
|
2015-08-26 20:43:36 +02:00
|
|
|
#include "import_parser.h"
|
2015-07-31 21:45:25 +02:00
|
|
|
#include "keychords.h"
|
2019-10-23 02:18:42 +02:00
|
|
|
#include "lmkd_service.h"
|
2019-03-13 18:18:24 +01:00
|
|
|
#include "mount_handler.h"
|
Proper mount namespace configuration for bionic
This CL fixes the design problem of the previous mechanism for providing
the bootstrap bionic and the runtime bionic to the same path.
Previously, bootstrap bionic was self-bind-mounted; i.e.
/system/bin/libc.so is bind-mounted to itself. And the runtime bionic
was bind-mounted on top of the bootstrap bionic. This has not only caused
problems like `adb sync` not working(b/122737045), but also is quite
difficult to understand due to the double-and-self mounting.
This is the new design:
Most importantly, these four are all distinct:
1) bootstrap bionic (/system/lib/bootstrap/libc.so)
2) runtime bionic (/apex/com.android.runtime/lib/bionic/libc.so)
3) mount point for 1) and 2) (/bionic/lib/libc.so)
4) symlink for 3) (/system/lib/libc.so -> /bionic/lib/libc.so)
Inside the mount namespace of the pre-apexd processes, 1) is
bind-mounted to 3). Likewise, inside the mount namespace of the
post-apexd processes, 2) is bind-mounted to 3). In other words, there is
no self-mount, and no double-mount.
Another change is that mount points are under /bionic and the legacy
paths become symlinks to the mount points. This is to make sure that
there is no bind mounts under /system, which is breaking some apps.
Finally, code for creating mount namespaces, mounting bionic, etc are
refactored to mount_namespace.cpp
Bug: 120266448
Bug: 123275379
Test: m, device boots, adb sync/push/pull works,
especially with following paths:
/bionic/lib64/libc.so
/bionic/bin/linker64
/system/lib64/bootstrap/libc.so
/system/bin/bootstrap/linker64
Change-Id: Icdfbdcc1efca540ac854d4df79e07ee61fca559f
2019-01-16 15:00:59 +01:00
|
|
|
#include "mount_namespace.h"
|
2008-10-21 16:00:00 +02:00
|
|
|
#include "property_service.h"
|
2019-04-23 02:46:37 +02:00
|
|
|
#include "proto_utils.h"
|
2017-04-18 01:34:20 +02:00
|
|
|
#include "reboot.h"
|
2018-08-03 22:36:18 +02:00
|
|
|
#include "reboot_utils.h"
|
2017-08-10 21:22:44 +02:00
|
|
|
#include "security.h"
|
2019-05-29 00:58:35 +02:00
|
|
|
#include "selabel.h"
|
2017-08-10 21:22:44 +02:00
|
|
|
#include "selinux.h"
|
2019-06-26 19:46:20 +02:00
|
|
|
#include "service.h"
|
|
|
|
#include "service_parser.h"
|
2017-09-06 22:43:57 +02:00
|
|
|
#include "sigchld_handler.h"
|
2019-04-23 02:46:37 +02:00
|
|
|
#include "system/core/init/property_service.pb.h"
|
2015-07-31 21:45:25 +02:00
|
|
|
#include "util.h"
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
using namespace std::chrono_literals;
|
2017-06-23 01:50:31 +02:00
|
|
|
using namespace std::string_literals;
|
|
|
|
|
2017-03-24 19:43:02 +01:00
|
|
|
using android::base::boot_clock;
|
2020-03-04 19:52:08 +01:00
|
|
|
using android::base::ConsumePrefix;
|
2017-03-29 01:40:41 +02:00
|
|
|
using android::base::GetProperty;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::ReadFileToString;
|
2019-08-20 00:21:25 +02:00
|
|
|
using android::base::SetProperty;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::StringPrintf;
|
2017-07-06 23:20:11 +02:00
|
|
|
using android::base::Timer;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::Trim;
|
2019-03-04 10:53:34 +01:00
|
|
|
using android::fs_mgr::AvbHandle;
|
2016-11-11 02:43:47 +01:00
|
|
|
|
2017-06-22 21:53:17 +02:00
|
|
|
namespace android {
|
|
|
|
namespace init {
|
|
|
|
|
2008-10-21 16:00:00 +02:00
|
|
|
static int property_triggers_enabled = 0;
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
static int signal_fd = -1;
|
2019-04-23 02:46:37 +02:00
|
|
|
static int property_fd = -1;
|
2015-04-25 06:13:44 +02:00
|
|
|
|
2020-03-10 19:47:24 +01:00
|
|
|
static std::unique_ptr<Subcontext> subcontext;
|
init: handle property service callbacks asynchronously
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
There are possible partial solutions here: the socket's buffer may be
increased or property_service may only send messages for the
properties that init will take action on, however all of these
solutions still lead to eventual deadlock. The only complete solution
is to handle these messages asynchronously.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
4) A lock for any actions with ServiceList or any Services, enforced
through thread annotations, particularly since this code was not
designed with the intention of being multi-threaded.
Bug: 146877356
Bug: 148236233
Test: boot
Test: kill hwservicemanager without deadlock
Change-Id: I84108e54217866205a48c45e8b59355012c32ea8
2020-01-29 23:09:24 +01:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
struct PendingControlMessage {
|
|
|
|
std::string message;
|
|
|
|
std::string name;
|
|
|
|
pid_t pid;
|
|
|
|
int fd;
|
|
|
|
};
|
|
|
|
static std::mutex pending_control_messages_lock;
|
|
|
|
static std::queue<PendingControlMessage> pending_control_messages;
|
|
|
|
|
|
|
|
// Init epolls various FDs to wait for various inputs. It previously waited on property changes
|
|
|
|
// with a blocking socket that contained the information related to the change, however, it was easy
|
|
|
|
// to fill that socket and deadlock the system. Now we use locks to handle the property changes
|
|
|
|
// directly in the property thread, however we still must wake the epoll to inform init that there
|
|
|
|
// is a change to process, so we use this FD. It is non-blocking, since we do not care how many
|
2020-03-27 04:32:17 +01:00
|
|
|
// times WakeMainInitThread() is called, only that the epoll will wake.
|
|
|
|
static int wake_main_thread_fd = -1;
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
static void InstallInitNotifier(Epoll* epoll) {
|
2020-03-27 04:32:17 +01:00
|
|
|
wake_main_thread_fd = eventfd(0, EFD_CLOEXEC);
|
|
|
|
if (wake_main_thread_fd == -1) {
|
|
|
|
PLOG(FATAL) << "Failed to create eventfd for waking init";
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
2020-03-27 04:32:17 +01:00
|
|
|
auto clear_eventfd = [] {
|
|
|
|
uint64_t counter;
|
|
|
|
TEMP_FAILURE_RETRY(read(wake_main_thread_fd, &counter, sizeof(counter)));
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
};
|
|
|
|
|
2020-03-27 04:32:17 +01:00
|
|
|
if (auto result = epoll->RegisterHandler(wake_main_thread_fd, clear_eventfd); !result.ok()) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
LOG(FATAL) << result.error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 04:32:17 +01:00
|
|
|
static void WakeMainInitThread() {
|
|
|
|
uint64_t counter = 1;
|
|
|
|
TEMP_FAILURE_RETRY(write(wake_main_thread_fd, &counter, sizeof(counter)));
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static class PropWaiterState {
|
|
|
|
public:
|
|
|
|
bool StartWaiting(const char* name, const char* value) {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
if (waiting_for_prop_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (GetProperty(name, "") != value) {
|
|
|
|
// Current property value is not equal to expected value
|
|
|
|
wait_prop_name_ = name;
|
|
|
|
wait_prop_value_ = value;
|
|
|
|
waiting_for_prop_.reset(new Timer());
|
|
|
|
} else {
|
|
|
|
LOG(INFO) << "start_waiting_for_property(\"" << name << "\", \"" << value
|
|
|
|
<< "\"): already set";
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetWaitForProp() {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
ResetWaitForPropLocked();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckAndResetWait(const std::string& name, const std::string& value) {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
// We always record how long init waited for ueventd to tell us cold boot finished.
|
|
|
|
// If we aren't waiting on this property, it means that ueventd finished before we even
|
|
|
|
// started to wait.
|
|
|
|
if (name == kColdBootDoneProp) {
|
|
|
|
auto time_waited = waiting_for_prop_ ? waiting_for_prop_->duration().count() : 0;
|
|
|
|
std::thread([time_waited] {
|
|
|
|
SetProperty("ro.boottime.init.cold_boot_wait", std::to_string(time_waited));
|
|
|
|
}).detach();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (waiting_for_prop_) {
|
|
|
|
if (wait_prop_name_ == name && wait_prop_value_ == value) {
|
|
|
|
LOG(INFO) << "Wait for property '" << wait_prop_name_ << "=" << wait_prop_value_
|
|
|
|
<< "' took " << *waiting_for_prop_;
|
|
|
|
ResetWaitForPropLocked();
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is not thread safe because it releases the lock when it returns, so the waiting state
|
|
|
|
// may change. However, we only use this function to prevent running commands in the main
|
|
|
|
// thread loop when we are waiting, so we do not care about false positives; only false
|
|
|
|
// negatives. StartWaiting() and this function are always called from the same thread, so false
|
|
|
|
// negatives are not possible and therefore we're okay.
|
|
|
|
bool MightBeWaiting() {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
return static_cast<bool>(waiting_for_prop_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void ResetWaitForPropLocked() {
|
|
|
|
wait_prop_name_.clear();
|
|
|
|
wait_prop_value_.clear();
|
|
|
|
waiting_for_prop_.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::mutex lock_;
|
|
|
|
std::unique_ptr<Timer> waiting_for_prop_{nullptr};
|
|
|
|
std::string wait_prop_name_;
|
|
|
|
std::string wait_prop_value_;
|
|
|
|
|
|
|
|
} prop_waiter_state;
|
|
|
|
|
|
|
|
bool start_waiting_for_property(const char* name, const char* value) {
|
|
|
|
return prop_waiter_state.StartWaiting(name, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetWaitForProp() {
|
|
|
|
prop_waiter_state.ResetWaitForProp();
|
|
|
|
}
|
|
|
|
|
2020-03-25 02:00:23 +01:00
|
|
|
static void UnwindMainThreadStack() {
|
|
|
|
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 1));
|
|
|
|
if (!backtrace->Unwind(0)) {
|
|
|
|
LOG(ERROR) << __FUNCTION__ << ": Failed to unwind callstack.";
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < backtrace->NumFrames(); i++) {
|
|
|
|
LOG(ERROR) << backtrace->FormatFrameData(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
static class ShutdownState {
|
|
|
|
public:
|
|
|
|
void TriggerShutdown(const std::string& command) {
|
|
|
|
// We can't call HandlePowerctlMessage() directly in this function,
|
|
|
|
// because it modifies the contents of the action queue, which can cause the action queue
|
|
|
|
// to get into a bad state if this function is called from a command being executed by the
|
|
|
|
// action queue. Instead we set this flag and ensure that shutdown happens before the next
|
|
|
|
// command is run in the main init loop.
|
|
|
|
auto lock = std::lock_guard{shutdown_command_lock_};
|
|
|
|
shutdown_command_ = command;
|
|
|
|
do_shutdown_ = true;
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::optional<std::string> CheckShutdown() {
|
|
|
|
auto lock = std::lock_guard{shutdown_command_lock_};
|
|
|
|
if (do_shutdown_ && !IsShuttingDown()) {
|
|
|
|
do_shutdown_ = false;
|
|
|
|
return shutdown_command_;
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-03-27 22:31:08 +01:00
|
|
|
bool do_shutdown() const { return do_shutdown_; }
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
private:
|
|
|
|
std::mutex shutdown_command_lock_;
|
|
|
|
std::string shutdown_command_;
|
|
|
|
bool do_shutdown_ = false;
|
|
|
|
} shutdown_state;
|
|
|
|
|
2020-03-27 22:31:08 +01:00
|
|
|
void DebugRebootLogging() {
|
|
|
|
LOG(INFO) << "do_shutdown: " << shutdown_state.do_shutdown()
|
|
|
|
<< " IsShuttingDown: " << IsShuttingDown();
|
|
|
|
if (shutdown_state.do_shutdown()) {
|
|
|
|
LOG(ERROR) << "sys.powerctl set while a previous shutdown command has not been handled";
|
|
|
|
UnwindMainThreadStack();
|
|
|
|
}
|
|
|
|
if (IsShuttingDown()) {
|
|
|
|
LOG(ERROR) << "sys.powerctl set while init is already shutting down";
|
|
|
|
UnwindMainThreadStack();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-20 00:31:58 +02:00
|
|
|
void DumpState() {
|
2017-07-28 01:20:58 +02:00
|
|
|
ServiceList::GetInstance().DumpState();
|
2017-04-20 00:31:58 +02:00
|
|
|
ActionManager::GetInstance().DumpState();
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
Parser CreateParser(ActionManager& action_manager, ServiceList& service_list) {
|
2017-07-27 21:54:48 +02:00
|
|
|
Parser parser;
|
|
|
|
|
2019-09-18 22:47:19 +02:00
|
|
|
parser.AddSectionParser("service", std::make_unique<ServiceParser>(
|
|
|
|
&service_list, subcontext.get(), std::nullopt));
|
|
|
|
parser.AddSectionParser("on",
|
|
|
|
std::make_unique<ActionParser>(&action_manager, subcontext.get()));
|
2017-07-27 21:54:48 +02:00
|
|
|
parser.AddSectionParser("import", std::make_unique<ImportParser>(&parser));
|
|
|
|
|
|
|
|
return parser;
|
|
|
|
}
|
|
|
|
|
2018-11-08 09:14:35 +01:00
|
|
|
// parser that only accepts new services
|
2019-12-05 13:35:19 +01:00
|
|
|
Parser CreateServiceOnlyParser(ServiceList& service_list, bool from_apex) {
|
2018-11-08 09:14:35 +01:00
|
|
|
Parser parser;
|
|
|
|
|
2019-12-05 13:35:19 +01:00
|
|
|
parser.AddSectionParser("service",
|
|
|
|
std::make_unique<ServiceParser>(&service_list, subcontext.get(),
|
|
|
|
std::nullopt, from_apex));
|
2018-11-08 09:14:35 +01:00
|
|
|
return parser;
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
static void LoadBootScripts(ActionManager& action_manager, ServiceList& service_list) {
|
|
|
|
Parser parser = CreateParser(action_manager, service_list);
|
2017-07-27 21:54:48 +02:00
|
|
|
|
|
|
|
std::string bootscript = GetProperty("ro.boot.init_rc", "");
|
|
|
|
if (bootscript.empty()) {
|
2019-11-04 19:30:36 +01:00
|
|
|
parser.ParseConfig("/system/etc/init/hw/init.rc");
|
2017-07-27 21:54:48 +02:00
|
|
|
if (!parser.ParseConfig("/system/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/system/etc/init");
|
|
|
|
}
|
2019-06-28 07:28:00 +02:00
|
|
|
// late_import is available only in Q and earlier release. As we don't
|
|
|
|
// have system_ext in those versions, skip late_import for system_ext.
|
|
|
|
parser.ParseConfig("/system_ext/etc/init");
|
2017-11-28 04:10:10 +01:00
|
|
|
if (!parser.ParseConfig("/product/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/product/etc/init");
|
2017-07-27 21:54:48 +02:00
|
|
|
}
|
|
|
|
if (!parser.ParseConfig("/odm/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/odm/etc/init");
|
|
|
|
}
|
2017-11-28 04:10:10 +01:00
|
|
|
if (!parser.ParseConfig("/vendor/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/vendor/etc/init");
|
|
|
|
}
|
2017-07-27 21:54:48 +02:00
|
|
|
} else {
|
|
|
|
parser.ParseConfig(bootscript);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
void PropertyChanged(const std::string& name, const std::string& value) {
|
2017-04-18 01:34:20 +02:00
|
|
|
// If the property is sys.powerctl, we bypass the event queue and immediately handle it.
|
|
|
|
// This is to ensure that init will always and immediately shutdown/reboot, regardless of
|
|
|
|
// if there are other pending events to process or if init is waiting on an exec service or
|
|
|
|
// waiting on a property.
|
2017-06-28 07:08:45 +02:00
|
|
|
// In non-thermal-shutdown case, 'shutdown' trigger will be fired to let device specific
|
|
|
|
// commands to be executed.
|
|
|
|
if (name == "sys.powerctl") {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
trigger_shutdown(value);
|
2017-06-28 07:08:45 +02:00
|
|
|
}
|
2017-04-18 01:34:20 +02:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (property_triggers_enabled) {
|
|
|
|
ActionManager::GetInstance().QueuePropertyChange(name, value);
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
2019-06-11 02:49:59 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
prop_waiter_state.CheckAndResetWait(name, value);
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
2018-09-28 01:10:46 +02:00
|
|
|
static std::optional<boot_clock::time_point> HandleProcessActions() {
|
|
|
|
std::optional<boot_clock::time_point> next_process_action_time;
|
2017-07-28 01:20:58 +02:00
|
|
|
for (const auto& s : ServiceList::GetInstance()) {
|
2018-09-28 01:10:46 +02:00
|
|
|
if ((s->flags() & SVC_RUNNING) && s->timeout_period()) {
|
|
|
|
auto timeout_time = s->time_started() + *s->timeout_period();
|
|
|
|
if (boot_clock::now() > timeout_time) {
|
|
|
|
s->Timeout();
|
|
|
|
} else {
|
|
|
|
if (!next_process_action_time || timeout_time < *next_process_action_time) {
|
|
|
|
next_process_action_time = timeout_time;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
if (!(s->flags() & SVC_RESTARTING)) continue;
|
2017-07-31 22:23:18 +02:00
|
|
|
|
2018-09-28 01:10:46 +02:00
|
|
|
auto restart_time = s->time_started() + s->restart_period();
|
2017-07-31 22:23:18 +02:00
|
|
|
if (boot_clock::now() > restart_time) {
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = s->Start(); !result.ok()) {
|
2017-08-25 19:36:52 +02:00
|
|
|
LOG(ERROR) << "Could not restart process '" << s->name() << "': " << result.error();
|
|
|
|
}
|
2017-07-31 22:23:18 +02:00
|
|
|
} else {
|
2018-09-28 01:10:46 +02:00
|
|
|
if (!next_process_action_time || restart_time < *next_process_action_time) {
|
|
|
|
next_process_action_time = restart_time;
|
2017-07-31 22:23:18 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-28 01:20:58 +02:00
|
|
|
}
|
2018-09-28 01:10:46 +02:00
|
|
|
return next_process_action_time;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
2020-03-10 19:47:24 +01:00
|
|
|
static Result<void> DoControlStart(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
return service->Start();
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> DoControlStop(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
service->Stop();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-10 19:47:24 +01:00
|
|
|
static Result<void> DoControlRestart(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
service->Restart();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
enum class ControlTarget {
|
|
|
|
SERVICE, // function gets called for the named service
|
|
|
|
INTERFACE, // action gets called for every service that holds this interface
|
|
|
|
};
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
using ControlMessageFunction = std::function<Result<void>(Service*)>;
|
2017-10-06 03:50:22 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
static const std::map<std::string, ControlMessageFunction, std::less<>>& GetControlMessageMap() {
|
2017-10-06 03:50:22 +02:00
|
|
|
// clang-format off
|
2020-03-04 19:52:08 +01:00
|
|
|
static const std::map<std::string, ControlMessageFunction, std::less<>> control_message_functions = {
|
|
|
|
{"sigstop_on", [](auto* service) { service->set_sigstop(true); return Result<void>{}; }},
|
|
|
|
{"sigstop_off", [](auto* service) { service->set_sigstop(false); return Result<void>{}; }},
|
|
|
|
{"oneshot_on", [](auto* service) { service->set_oneshot(true); return Result<void>{}; }},
|
|
|
|
{"oneshot_off", [](auto* service) { service->set_oneshot(false); return Result<void>{}; }},
|
|
|
|
{"start", DoControlStart},
|
|
|
|
{"stop", DoControlStop},
|
|
|
|
{"restart", DoControlRestart},
|
2017-10-06 03:50:22 +02:00
|
|
|
};
|
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
return control_message_functions;
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
static bool HandleControlMessage(std::string_view message, const std::string& name,
|
|
|
|
pid_t from_pid) {
|
|
|
|
std::string cmdline_path = StringPrintf("proc/%d/cmdline", from_pid);
|
2018-02-21 19:37:44 +01:00
|
|
|
std::string process_cmdline;
|
|
|
|
if (ReadFileToString(cmdline_path, &process_cmdline)) {
|
|
|
|
std::replace(process_cmdline.begin(), process_cmdline.end(), '\0', ' ');
|
|
|
|
process_cmdline = Trim(process_cmdline);
|
|
|
|
} else {
|
|
|
|
process_cmdline = "unknown process";
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
Service* service = nullptr;
|
|
|
|
auto action = message;
|
|
|
|
if (ConsumePrefix(&action, "interface_")) {
|
|
|
|
service = ServiceList::GetInstance().FindInterface(name);
|
|
|
|
} else {
|
|
|
|
service = ServiceList::GetInstance().FindService(name);
|
|
|
|
}
|
2018-05-08 22:46:39 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
if (service == nullptr) {
|
|
|
|
LOG(ERROR) << "Control message: Could not find '" << name << "' for ctl." << message
|
|
|
|
<< " from pid: " << from_pid << " (" << process_cmdline << ")";
|
|
|
|
return false;
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
const auto& map = GetControlMessageMap();
|
|
|
|
const auto it = map.find(action);
|
|
|
|
if (it == map.end()) {
|
|
|
|
LOG(ERROR) << "Unknown control msg '" << message << "'";
|
2019-05-23 21:13:29 +02:00
|
|
|
return false;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
2020-03-04 19:52:08 +01:00
|
|
|
const auto& function = it->second;
|
2017-10-06 03:50:22 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
if (auto result = function(service); !result.ok()) {
|
|
|
|
LOG(ERROR) << "Control message: Could not ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline
|
|
|
|
<< "): " << result.error();
|
2019-05-23 21:13:29 +02:00
|
|
|
return false;
|
2018-05-08 22:46:39 +02:00
|
|
|
}
|
2019-08-29 03:34:24 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
LOG(INFO) << "Control message: Processed ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline << ")";
|
2019-05-23 21:13:29 +02:00
|
|
|
return true;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
bool QueueControlMessage(const std::string& message, const std::string& name, pid_t pid, int fd) {
|
|
|
|
auto lock = std::lock_guard{pending_control_messages_lock};
|
|
|
|
if (pending_control_messages.size() > 100) {
|
|
|
|
LOG(ERROR) << "Too many pending control messages, dropped '" << message << "' for '" << name
|
|
|
|
<< "' from pid: " << pid;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
pending_control_messages.push({message, name, pid, fd});
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void HandleControlMessages() {
|
|
|
|
auto lock = std::unique_lock{pending_control_messages_lock};
|
|
|
|
// Init historically would only execute handle one property message, including control messages
|
|
|
|
// in each iteration of its main loop. We retain this behavior here to prevent starvation of
|
|
|
|
// other actions in the main loop.
|
|
|
|
if (!pending_control_messages.empty()) {
|
|
|
|
auto control_message = pending_control_messages.front();
|
|
|
|
pending_control_messages.pop();
|
|
|
|
lock.unlock();
|
|
|
|
|
|
|
|
bool success = HandleControlMessage(control_message.message, control_message.name,
|
|
|
|
control_message.pid);
|
|
|
|
|
|
|
|
uint32_t response = success ? PROP_SUCCESS : PROP_ERROR_HANDLE_CONTROL_MESSAGE;
|
|
|
|
if (control_message.fd != -1) {
|
|
|
|
TEMP_FAILURE_RETRY(send(control_message.fd, &response, sizeof(response), 0));
|
|
|
|
close(control_message.fd);
|
|
|
|
}
|
|
|
|
lock.lock();
|
|
|
|
}
|
|
|
|
// If we still have items to process, make sure we wake back up to do so.
|
|
|
|
if (!pending_control_messages.empty()) {
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> wait_for_coldboot_done_action(const BuiltinArguments& args) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (!prop_waiter_state.StartWaiting(kColdBootDoneProp, "true")) {
|
2019-06-11 02:49:59 +02:00
|
|
|
LOG(FATAL) << "Could not wait for '" << kColdBootDoneProp << "'";
|
2015-04-25 03:50:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> SetupCgroupsAction(const BuiltinArguments&) {
|
2018-12-21 20:41:50 +01:00
|
|
|
// Have to create <CGROUPS_RC_DIR> using make_dir function
|
|
|
|
// for appropriate sepolicy to be set for it
|
2019-03-26 21:34:32 +01:00
|
|
|
make_dir(android::base::Dirname(CGROUPS_RC_PATH), 0711);
|
2019-04-03 00:10:40 +02:00
|
|
|
if (!CgroupSetup()) {
|
2018-12-21 20:41:50 +01:00
|
|
|
return ErrnoError() << "Failed to setup cgroups";
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2018-12-21 20:41:50 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 13:33:58 +01:00
|
|
|
static void export_oem_lock_status() {
|
2017-03-29 01:40:41 +02:00
|
|
|
if (!android::base::GetBoolProperty("ro.oem_unlock_supported", false)) {
|
2015-12-07 13:33:58 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-08-20 00:21:25 +02:00
|
|
|
ImportKernelCmdline([](const std::string& key, const std::string& value) {
|
|
|
|
if (key == "androidboot.verifiedbootstate") {
|
|
|
|
SetProperty("ro.boot.flash.locked", value == "orange" ? "0" : "1");
|
2015-05-07 04:19:24 +02:00
|
|
|
}
|
2019-08-20 00:21:25 +02:00
|
|
|
});
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> property_enable_triggers_action(const BuiltinArguments& args) {
|
2016-11-26 14:14:07 +01:00
|
|
|
/* Enable property triggers. */
|
|
|
|
property_triggers_enabled = 1;
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2016-11-26 14:14:07 +01:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> queue_property_triggers_action(const BuiltinArguments& args) {
|
2016-11-26 14:14:07 +01:00
|
|
|
ActionManager::GetInstance().QueueBuiltinAction(property_enable_triggers_action, "enable_property_trigger");
|
2017-04-17 22:25:29 +02:00
|
|
|
ActionManager::GetInstance().QueueAllPropertyActions();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2016-06-29 15:30:00 +02:00
|
|
|
// Set the UDC controller for the ConfigFS USB Gadgets.
|
|
|
|
// Read the UDC controller in use from "/sys/class/udc".
|
|
|
|
// In case of multiple UDC controllers select the first one.
|
|
|
|
static void set_usb_controller() {
|
|
|
|
std::unique_ptr<DIR, decltype(&closedir)>dir(opendir("/sys/class/udc"), closedir);
|
|
|
|
if (!dir) return;
|
|
|
|
|
|
|
|
dirent* dp;
|
|
|
|
while ((dp = readdir(dir.get())) != nullptr) {
|
|
|
|
if (dp->d_name[0] == '.') continue;
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("sys.usb.controller", dp->d_name);
|
2016-06-29 15:30:00 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
static void HandleSigtermSignal(const signalfd_siginfo& siginfo) {
|
2017-09-06 22:43:57 +02:00
|
|
|
if (siginfo.ssi_pid != 0) {
|
|
|
|
// Drop any userspace SIGTERM requests.
|
|
|
|
LOG(DEBUG) << "Ignoring SIGTERM from pid " << siginfo.ssi_pid;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-09-26 17:26:12 +02:00
|
|
|
HandlePowerctlMessage("shutdown,container");
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
static void HandleSignalFd() {
|
|
|
|
signalfd_siginfo siginfo;
|
|
|
|
ssize_t bytes_read = TEMP_FAILURE_RETRY(read(signal_fd, &siginfo, sizeof(siginfo)));
|
|
|
|
if (bytes_read != sizeof(siginfo)) {
|
|
|
|
PLOG(ERROR) << "Failed to read siginfo from signal_fd";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (siginfo.ssi_signo) {
|
|
|
|
case SIGCHLD:
|
|
|
|
ReapAnyOutstandingChildren();
|
|
|
|
break;
|
|
|
|
case SIGTERM:
|
|
|
|
HandleSigtermSignal(siginfo);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
PLOG(ERROR) << "signal_fd: received unexpected signal " << siginfo.ssi_signo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void UnblockSignals() {
|
|
|
|
const struct sigaction act { .sa_handler = SIG_DFL };
|
|
|
|
sigaction(SIGCHLD, &act, nullptr);
|
|
|
|
|
2018-02-02 02:14:30 +01:00
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
2018-04-12 03:46:38 +02:00
|
|
|
sigaddset(&mask, SIGCHLD);
|
2018-02-02 02:14:30 +01:00
|
|
|
sigaddset(&mask, SIGTERM);
|
|
|
|
|
|
|
|
if (sigprocmask(SIG_UNBLOCK, &mask, nullptr) == -1) {
|
2018-04-12 03:46:38 +02:00
|
|
|
PLOG(FATAL) << "failed to unblock signals for PID " << getpid();
|
2018-02-02 02:14:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
static void InstallSignalFdHandler(Epoll* epoll) {
|
2018-04-12 03:46:38 +02:00
|
|
|
// Applying SA_NOCLDSTOP to a defaulted SIGCHLD handler prevents the signalfd from receiving
|
|
|
|
// SIGCHLD when a child process stops or continues (b/77867680#comment9).
|
|
|
|
const struct sigaction act { .sa_handler = SIG_DFL, .sa_flags = SA_NOCLDSTOP };
|
|
|
|
sigaction(SIGCHLD, &act, nullptr);
|
|
|
|
|
2017-09-06 22:43:57 +02:00
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
2018-04-12 03:46:38 +02:00
|
|
|
sigaddset(&mask, SIGCHLD);
|
|
|
|
|
|
|
|
if (!IsRebootCapable()) {
|
|
|
|
// If init does not have the CAP_SYS_BOOT capability, it is running in a container.
|
|
|
|
// In that case, receiving SIGTERM will cause the system to shut down.
|
|
|
|
sigaddset(&mask, SIGTERM);
|
|
|
|
}
|
2017-09-06 22:43:57 +02:00
|
|
|
|
|
|
|
if (sigprocmask(SIG_BLOCK, &mask, nullptr) == -1) {
|
2018-04-12 03:46:38 +02:00
|
|
|
PLOG(FATAL) << "failed to block signals";
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
// Register a handler to unblock signals in the child processes.
|
|
|
|
const int result = pthread_atfork(nullptr, nullptr, &UnblockSignals);
|
2018-02-02 02:14:30 +01:00
|
|
|
if (result != 0) {
|
|
|
|
LOG(FATAL) << "Failed to register a fork handler: " << strerror(result);
|
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
signal_fd = signalfd(-1, &mask, SFD_CLOEXEC);
|
|
|
|
if (signal_fd == -1) {
|
|
|
|
PLOG(FATAL) << "failed to create signalfd";
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = epoll->RegisterHandler(signal_fd, HandleSignalFd); !result.ok()) {
|
2015-10-25 01:20:18 +02:00
|
|
|
LOG(FATAL) << result.error();
|
|
|
|
}
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2018-05-19 00:25:15 +02:00
|
|
|
void HandleKeychord(const std::vector<int>& keycodes) {
|
2018-05-17 00:10:24 +02:00
|
|
|
// Only handle keychords if adb is enabled.
|
|
|
|
std::string adb_enabled = android::base::GetProperty("init.svc.adbd", "");
|
2018-05-19 00:25:15 +02:00
|
|
|
if (adb_enabled != "running") {
|
|
|
|
LOG(WARNING) << "Not starting service for keychord " << android::base::Join(keycodes, ' ')
|
|
|
|
<< " because ADB is disabled";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto found = false;
|
|
|
|
for (const auto& service : ServiceList::GetInstance()) {
|
|
|
|
auto svc = service.get();
|
|
|
|
if (svc->keycodes() == keycodes) {
|
|
|
|
found = true;
|
|
|
|
LOG(INFO) << "Starting service '" << svc->name() << "' from keychord "
|
|
|
|
<< android::base::Join(keycodes, ' ');
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = svc->Start(); !result.ok()) {
|
2018-05-19 00:25:15 +02:00
|
|
|
LOG(ERROR) << "Could not start service '" << svc->name() << "' from keychord "
|
|
|
|
<< android::base::Join(keycodes, ' ') << ": " << result.error();
|
2018-05-17 00:10:24 +02:00
|
|
|
}
|
|
|
|
}
|
2018-05-19 00:25:15 +02:00
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
LOG(ERROR) << "Service for keychord " << android::base::Join(keycodes, ' ') << " not found";
|
2018-05-17 00:10:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-11 17:57:24 +02:00
|
|
|
static void UmountDebugRamdisk() {
|
|
|
|
if (umount("/debug_ramdisk") != 0) {
|
2019-11-22 08:14:10 +01:00
|
|
|
PLOG(ERROR) << "Failed to umount /debug_ramdisk";
|
2019-04-11 17:57:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 08:14:10 +01:00
|
|
|
static void MountExtraFilesystems() {
|
|
|
|
#define CHECKCALL(x) \
|
|
|
|
if ((x) != 0) PLOG(FATAL) << #x " failed.";
|
|
|
|
|
|
|
|
// /apex is used to mount APEXes
|
|
|
|
CHECKCALL(mount("tmpfs", "/apex", "tmpfs", MS_NOEXEC | MS_NOSUID | MS_NODEV,
|
|
|
|
"mode=0755,uid=0,gid=0"));
|
|
|
|
|
|
|
|
// /linkerconfig is used to keep generated linker configuration
|
|
|
|
CHECKCALL(mount("tmpfs", "/linkerconfig", "tmpfs", MS_NOEXEC | MS_NOSUID | MS_NODEV,
|
|
|
|
"mode=0755,uid=0,gid=0"));
|
|
|
|
#undef CHECKCALL
|
|
|
|
}
|
|
|
|
|
2019-03-27 16:10:41 +01:00
|
|
|
static void RecordStageBoottimes(const boot_clock::time_point& second_stage_start_time) {
|
|
|
|
int64_t first_stage_start_time_ns = -1;
|
2019-05-08 21:44:50 +02:00
|
|
|
if (auto first_stage_start_time_str = getenv(kEnvFirstStageStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
first_stage_start_time_str) {
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("ro.boottime.init", first_stage_start_time_str);
|
2019-03-27 16:10:41 +01:00
|
|
|
android::base::ParseInt(first_stage_start_time_str, &first_stage_start_time_ns);
|
|
|
|
}
|
2019-05-08 21:44:50 +02:00
|
|
|
unsetenv(kEnvFirstStageStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
|
|
|
|
int64_t selinux_start_time_ns = -1;
|
2019-05-08 21:44:50 +02:00
|
|
|
if (auto selinux_start_time_str = getenv(kEnvSelinuxStartedAt); selinux_start_time_str) {
|
2019-03-27 16:10:41 +01:00
|
|
|
android::base::ParseInt(selinux_start_time_str, &selinux_start_time_ns);
|
|
|
|
}
|
2019-05-08 21:44:50 +02:00
|
|
|
unsetenv(kEnvSelinuxStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
|
|
|
|
if (selinux_start_time_ns == -1) return;
|
|
|
|
if (first_stage_start_time_ns == -1) return;
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("ro.boottime.init.first_stage",
|
|
|
|
std::to_string(selinux_start_time_ns - first_stage_start_time_ns));
|
|
|
|
SetProperty("ro.boottime.init.selinux",
|
|
|
|
std::to_string(second_stage_start_time.time_since_epoch().count() -
|
|
|
|
selinux_start_time_ns));
|
2019-03-27 16:10:41 +01:00
|
|
|
}
|
|
|
|
|
2019-04-23 02:46:37 +02:00
|
|
|
void SendLoadPersistentPropertiesMessage() {
|
|
|
|
auto init_message = InitMessage{};
|
|
|
|
init_message.set_load_persistent_properties(true);
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = SendMessage(property_fd, init_message); !result.ok()) {
|
2019-04-23 02:46:37 +02:00
|
|
|
LOG(ERROR) << "Failed to send load persistent properties message: " << result.error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-06 23:12:05 +01:00
|
|
|
int SecondStageMain(int argc, char** argv) {
|
2018-08-03 22:36:18 +02:00
|
|
|
if (REBOOT_BOOTLOADER_ON_PANIC) {
|
|
|
|
InstallRebootSignalHandlers();
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
}
|
2011-12-19 20:21:32 +01:00
|
|
|
|
2019-03-27 16:10:41 +01:00
|
|
|
boot_clock::time_point start_time = boot_clock::now();
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
trigger_shutdown = [](const std::string& command) { shutdown_state.TriggerShutdown(command); };
|
2019-11-13 01:21:20 +01:00
|
|
|
|
2019-05-28 19:19:44 +02:00
|
|
|
SetStdioToDevNull(argv);
|
|
|
|
InitKernelLogging(argv);
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
LOG(INFO) << "init second stage started!";
|
2015-04-26 02:42:52 +02:00
|
|
|
|
2020-03-16 18:17:05 +01:00
|
|
|
// Init should not crash because of a dependence on any other process, therefore we ignore
|
|
|
|
// SIGPIPE and handle EPIPE at the call site directly. Note that setting a signal to SIG_IGN
|
|
|
|
// is inherited across exec, but custom signal handlers are not. Since we do not want to
|
|
|
|
// ignore SIGPIPE for child processes, we set a no-op function for the signal handler instead.
|
|
|
|
{
|
|
|
|
struct sigaction action = {.sa_flags = SA_RESTART};
|
|
|
|
action.sa_handler = [](int) {};
|
|
|
|
sigaction(SIGPIPE, &action, nullptr);
|
|
|
|
}
|
2019-10-23 02:18:42 +02:00
|
|
|
|
2019-04-18 23:56:24 +02:00
|
|
|
// Set init and its forked children's oom_adj.
|
2019-10-23 02:18:42 +02:00
|
|
|
if (auto result =
|
|
|
|
WriteFile("/proc/1/oom_score_adj", StringPrintf("%d", DEFAULT_OOM_SCORE_ADJUST));
|
2020-02-05 19:49:33 +01:00
|
|
|
!result.ok()) {
|
2019-10-23 02:18:42 +02:00
|
|
|
LOG(ERROR) << "Unable to write " << DEFAULT_OOM_SCORE_ADJUST
|
|
|
|
<< " to /proc/1/oom_score_adj: " << result.error();
|
2019-04-18 23:56:24 +02:00
|
|
|
}
|
|
|
|
|
2017-05-02 23:44:39 +02:00
|
|
|
// Set up a session keyring that all processes will have access to. It
|
|
|
|
// will hold things like FBE encryption keys. No process should override
|
|
|
|
// its session keyring.
|
2017-05-10 02:09:06 +02:00
|
|
|
keyctl_get_keyring_ID(KEY_SPEC_SESSION_KEYRING, 1);
|
2017-05-02 23:44:39 +02:00
|
|
|
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
// Indicate that booting is in progress to background fw loaders, etc.
|
|
|
|
close(open("/dev/.booting", O_WRONLY | O_CREAT | O_CLOEXEC, 0000));
|
2015-02-28 15:39:11 +01:00
|
|
|
|
2019-03-04 10:53:34 +01:00
|
|
|
// See if need to load debug props to allow adb root, when the device is unlocked.
|
|
|
|
const char* force_debuggable_env = getenv("INIT_FORCE_DEBUGGABLE");
|
2019-08-20 00:21:25 +02:00
|
|
|
bool load_debug_prop = false;
|
2019-03-04 10:53:34 +01:00
|
|
|
if (force_debuggable_env && AvbHandle::IsDeviceUnlocked()) {
|
|
|
|
load_debug_prop = "true"s == force_debuggable_env;
|
|
|
|
}
|
|
|
|
unsetenv("INIT_FORCE_DEBUGGABLE");
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
// Umount the debug ramdisk so property service doesn't read .prop files from there, when it
|
|
|
|
// is not meant to.
|
|
|
|
if (!load_debug_prop) {
|
|
|
|
UmountDebugRamdisk();
|
|
|
|
}
|
|
|
|
|
|
|
|
PropertyInit();
|
|
|
|
|
|
|
|
// Umount the debug ramdisk after property service has read the .prop files when it means to.
|
|
|
|
if (load_debug_prop) {
|
|
|
|
UmountDebugRamdisk();
|
|
|
|
}
|
|
|
|
|
2019-11-22 08:14:10 +01:00
|
|
|
// Mount extra filesystems required during second stage init
|
|
|
|
MountExtraFilesystems();
|
|
|
|
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
// Now set up SELinux for second stage.
|
2017-08-10 21:22:44 +02:00
|
|
|
SelinuxSetupKernelLogging();
|
|
|
|
SelabelInitialize();
|
|
|
|
SelinuxRestoreContext();
|
2012-01-13 14:48:47 +01:00
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
Epoll epoll;
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = epoll.Open(); !result.ok()) {
|
2015-10-25 01:20:18 +02:00
|
|
|
PLOG(FATAL) << result.error();
|
2015-04-25 06:13:44 +02:00
|
|
|
}
|
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
InstallSignalFdHandler(&epoll);
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
InstallInitNotifier(&epoll);
|
2019-04-23 02:46:37 +02:00
|
|
|
StartPropertyService(&property_fd);
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
// Make the time that init stages started available for bootstat to log.
|
|
|
|
RecordStageBoottimes(start_time);
|
|
|
|
|
|
|
|
// Set libavb version for Framework-only OTA match in Treble build.
|
|
|
|
if (const char* avb_version = getenv("INIT_AVB_VERSION"); avb_version != nullptr) {
|
|
|
|
SetProperty("ro.boot.avb_version", avb_version);
|
|
|
|
}
|
|
|
|
unsetenv("INIT_AVB_VERSION");
|
|
|
|
|
|
|
|
fs_mgr_vendor_overlay_mount_all();
|
|
|
|
export_oem_lock_status();
|
2019-03-13 18:18:24 +01:00
|
|
|
MountHandler mount_handler(&epoll);
|
2016-06-29 15:30:00 +02:00
|
|
|
set_usb_controller();
|
2011-12-16 23:18:06 +01:00
|
|
|
|
2019-07-23 01:05:36 +02:00
|
|
|
const BuiltinFunctionMap& function_map = GetBuiltinFunctionMap();
|
2015-08-26 20:43:36 +02:00
|
|
|
Action::set_function_map(&function_map);
|
|
|
|
|
Proper mount namespace configuration for bionic
This CL fixes the design problem of the previous mechanism for providing
the bootstrap bionic and the runtime bionic to the same path.
Previously, bootstrap bionic was self-bind-mounted; i.e.
/system/bin/libc.so is bind-mounted to itself. And the runtime bionic
was bind-mounted on top of the bootstrap bionic. This has not only caused
problems like `adb sync` not working(b/122737045), but also is quite
difficult to understand due to the double-and-self mounting.
This is the new design:
Most importantly, these four are all distinct:
1) bootstrap bionic (/system/lib/bootstrap/libc.so)
2) runtime bionic (/apex/com.android.runtime/lib/bionic/libc.so)
3) mount point for 1) and 2) (/bionic/lib/libc.so)
4) symlink for 3) (/system/lib/libc.so -> /bionic/lib/libc.so)
Inside the mount namespace of the pre-apexd processes, 1) is
bind-mounted to 3). Likewise, inside the mount namespace of the
post-apexd processes, 2) is bind-mounted to 3). In other words, there is
no self-mount, and no double-mount.
Another change is that mount points are under /bionic and the legacy
paths become symlinks to the mount points. This is to make sure that
there is no bind mounts under /system, which is breaking some apps.
Finally, code for creating mount namespaces, mounting bionic, etc are
refactored to mount_namespace.cpp
Bug: 120266448
Bug: 123275379
Test: m, device boots, adb sync/push/pull works,
especially with following paths:
/bionic/lib64/libc.so
/bionic/bin/linker64
/system/lib64/bootstrap/libc.so
/system/bin/bootstrap/linker64
Change-Id: Icdfbdcc1efca540ac854d4df79e07ee61fca559f
2019-01-16 15:00:59 +01:00
|
|
|
if (!SetupMountNamespaces()) {
|
|
|
|
PLOG(FATAL) << "SetupMountNamespaces failed";
|
|
|
|
}
|
|
|
|
|
2019-09-18 22:47:19 +02:00
|
|
|
subcontext = InitializeSubcontext();
|
2017-09-13 00:58:47 +02:00
|
|
|
|
2017-04-20 00:31:58 +02:00
|
|
|
ActionManager& am = ActionManager::GetInstance();
|
2017-07-28 01:20:58 +02:00
|
|
|
ServiceList& sm = ServiceList::GetInstance();
|
2017-04-20 00:31:58 +02:00
|
|
|
|
2017-07-27 21:54:48 +02:00
|
|
|
LoadBootScripts(am, sm);
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2017-03-13 20:24:49 +01:00
|
|
|
// Turning this on and letting the INFO logging be discarded adds 0.2s to
|
|
|
|
// Nexus 9 boot time, so it's disabled by default.
|
2017-04-20 00:31:58 +02:00
|
|
|
if (false) DumpState();
|
2015-07-24 02:53:11 +02:00
|
|
|
|
2019-02-14 21:46:13 +01:00
|
|
|
// Make the GSI status available before scripts start running.
|
2020-02-25 09:31:10 +01:00
|
|
|
auto is_running = android::gsi::IsGsiRunning() ? "1" : "0";
|
|
|
|
SetProperty(gsi::kGsiBootedProp, is_running);
|
|
|
|
auto is_installed = android::gsi::IsGsiInstalled() ? "1" : "0";
|
|
|
|
SetProperty(gsi::kGsiInstalledProp, is_installed);
|
2019-02-14 21:46:13 +01:00
|
|
|
|
2018-12-21 20:41:50 +01:00
|
|
|
am.QueueBuiltinAction(SetupCgroupsAction, "SetupCgroups");
|
2019-07-30 13:11:20 +02:00
|
|
|
am.QueueBuiltinAction(SetKptrRestrictAction, "SetKptrRestrict");
|
init: add builtin check for perf_event LSM hooks
Historically, the syscall was controlled by a system-wide
perf_event_paranoid sysctl, which is not flexible enough to allow only
specific processes to use the syscall. However, SELinux support for the
syscall has been upstreamed recently[1] (and is being backported to
Android R release common kernels).
[1] https://github.com/torvalds/linux/commit/da97e18458fb42d7c00fac5fd1c56a3896ec666e
As the presence of these hooks is not guaranteed on all Android R
platforms (since we support upgrades while keeping an older kernel), we
need to test for the feature dynamically. The LSM hooks themselves have
no way of being detected directly, so we instead test for their effects,
so we perform several syscalls, and look for a specific success/failure
combination, corresponding to the platform's SELinux policy.
If hooks are detected, perf_event_paranoid is set to -1 (unrestricted),
as the SELinux policy is then sufficient to control access.
This is done within init for several reasons:
* CAP_SYS_ADMIN side-steps perf_event_paranoid, so the tests can be done
if non-root users aren't allowed to use the syscall (the default).
* init is already the setter of the paranoid value (see init.rc), which
is also a privileged operation.
* the test itself is simple (couple of syscalls), so having a dedicated
test binary/domain felt excessive.
I decided to go through a new sysprop (set by a builtin test in
second-stage init), and keeping the actuation in init.rc. We can change
it to an immediate write to the paranoid value if a use-case comes up
that requires the decision to be made earlier in the init sequence.
Bug: 137092007
Change-Id: Ib13a31fee896f17a28910d993df57168a83a4b3d
2020-01-14 23:02:53 +01:00
|
|
|
am.QueueBuiltinAction(TestPerfEventSelinuxAction, "TestPerfEventSelinux");
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("early-init");
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-04-25 03:50:30 +02:00
|
|
|
// Queue an action that waits for coldboot done so we know ueventd has set up all of /dev...
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueBuiltinAction(wait_for_coldboot_done_action, "wait_for_coldboot_done");
|
2015-04-25 03:50:30 +02:00
|
|
|
// ... so that we can start queuing up actions that require stuff from /dev.
|
2017-08-10 21:22:44 +02:00
|
|
|
am.QueueBuiltinAction(MixHwrngIntoLinuxRngAction, "MixHwrngIntoLinuxRng");
|
|
|
|
am.QueueBuiltinAction(SetMmapRndBitsAction, "SetMmapRndBits");
|
2018-05-19 00:25:15 +02:00
|
|
|
Keychords keychords;
|
2015-10-25 01:20:18 +02:00
|
|
|
am.QueueBuiltinAction(
|
2019-06-10 20:08:01 +02:00
|
|
|
[&epoll, &keychords](const BuiltinArguments& args) -> Result<void> {
|
|
|
|
for (const auto& svc : ServiceList::GetInstance()) {
|
|
|
|
keychords.Register(svc->keycodes());
|
|
|
|
}
|
|
|
|
keychords.Start(&epoll, HandleKeychord);
|
|
|
|
return {};
|
|
|
|
},
|
|
|
|
"KeychordInit");
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-03-28 07:20:44 +01:00
|
|
|
// Trigger all the boot actions to get us started.
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("init");
|
2011-08-25 00:28:23 +02:00
|
|
|
|
2015-02-07 05:15:18 +01:00
|
|
|
// Repeat mix_hwrng_into_linux_rng in case /dev/hw_random or /dev/random
|
|
|
|
// wasn't ready immediately after wait_for_coldboot_done
|
2017-08-10 21:22:44 +02:00
|
|
|
am.QueueBuiltinAction(MixHwrngIntoLinuxRngAction, "MixHwrngIntoLinuxRng");
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-02-07 05:15:18 +01:00
|
|
|
// Don't mount filesystems or start core system services in charger mode.
|
2017-03-29 01:40:41 +02:00
|
|
|
std::string bootmode = GetProperty("ro.bootmode", "");
|
2015-07-24 19:11:05 +02:00
|
|
|
if (bootmode == "charger") {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("charger");
|
2011-08-25 00:28:23 +02:00
|
|
|
} else {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("late-init");
|
2011-08-25 00:28:23 +02:00
|
|
|
}
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-02-07 05:15:18 +01:00
|
|
|
// Run all property triggers based on current state of the properties.
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueBuiltinAction(queue_property_triggers_action, "queue_property_triggers");
|
2010-04-14 04:52:01 +02:00
|
|
|
|
2015-04-25 03:50:30 +02:00
|
|
|
while (true) {
|
2017-03-24 00:54:38 +01:00
|
|
|
// By default, sleep until something happens.
|
2015-10-25 01:20:18 +02:00
|
|
|
auto epoll_timeout = std::optional<std::chrono::milliseconds>{};
|
2017-03-24 00:54:38 +01:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
auto shutdown_command = shutdown_state.CheckShutdown();
|
|
|
|
if (shutdown_command) {
|
2020-03-25 02:00:23 +01:00
|
|
|
LOG(INFO) << "Got shutdown_command '" << *shutdown_command
|
|
|
|
<< "' Calling HandlePowerctlMessage()";
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
HandlePowerctlMessage(*shutdown_command);
|
init: fix crash when reboot is triggered by a builtin
Builtin commands may set the sys.powerctl property, which causes
reboot to be immediately processed. Unfortunately, part of the reboot
processing involves clearing the action queue, so when this scenario
happens, ActionManager::ExecuteOneCommand() can abort due to its state
being unexpectedly changed.
Longer term, the real fix here is to split init and property service.
In this case, the property sets will be sent to property service and
the reboot will only be processed once property service responds back
to init that the property has been set. Since that will not happen
within the action queue, there will be no risk of failure.
Short term, this change sets a flag in init to shutdown the device
before the next action is run, which defers the shutdown enough to fix
the crash, but continues to prevent any further commands from running.
Bug: 65374456
Test: force bullhead into the repro case and observe that it no longer
repros
Change-Id: I89c73dad8d7912a845d694b095cab061b8dcc05e
2017-09-13 23:39:45 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (!(prop_waiter_state.MightBeWaiting() || Service::is_exec_service_running())) {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.ExecuteOneCommand();
|
2015-02-07 05:15:18 +01:00
|
|
|
}
|
2020-01-31 17:33:36 +01:00
|
|
|
if (!IsShuttingDown()) {
|
|
|
|
auto next_process_action_time = HandleProcessActions();
|
|
|
|
|
|
|
|
// If there's a process that needs restarting, wake up in time for that.
|
|
|
|
if (next_process_action_time) {
|
|
|
|
epoll_timeout = std::chrono::ceil<std::chrono::milliseconds>(
|
|
|
|
*next_process_action_time - boot_clock::now());
|
|
|
|
if (*epoll_timeout < 0ms) epoll_timeout = 0ms;
|
2017-03-24 00:54:38 +01:00
|
|
|
}
|
2020-01-31 17:33:36 +01:00
|
|
|
}
|
2016-11-11 02:43:47 +01:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (!(prop_waiter_state.MightBeWaiting() || Service::is_exec_service_running())) {
|
2017-03-24 00:54:38 +01:00
|
|
|
// If there's more work to do, wake up again immediately.
|
2015-10-25 01:20:18 +02:00
|
|
|
if (am.HasMoreCommands()) epoll_timeout = 0ms;
|
2015-02-04 23:46:36 +01:00
|
|
|
}
|
2010-04-14 04:52:01 +02:00
|
|
|
|
2019-08-30 23:12:56 +02:00
|
|
|
auto pending_functions = epoll.Wait(epoll_timeout);
|
2020-02-05 19:49:33 +01:00
|
|
|
if (!pending_functions.ok()) {
|
2019-08-30 23:12:56 +02:00
|
|
|
LOG(ERROR) << pending_functions.error();
|
|
|
|
} else if (!pending_functions->empty()) {
|
|
|
|
// We always reap children before responding to the other pending functions. This is to
|
|
|
|
// prevent a race where other daemons see that a service has exited and ask init to
|
|
|
|
// start it again via ctl.start before init has reaped it.
|
|
|
|
ReapAnyOutstandingChildren();
|
|
|
|
for (const auto& function : *pending_functions) {
|
|
|
|
(*function)();
|
|
|
|
}
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
2020-03-27 22:08:20 +01:00
|
|
|
if (!IsShuttingDown()) {
|
|
|
|
HandleControlMessages();
|
|
|
|
}
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-22 21:53:17 +02:00
|
|
|
|
|
|
|
} // namespace init
|
|
|
|
} // namespace android
|