2008-10-21 16:00:00 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2017-04-07 01:30:22 +02:00
|
|
|
#include "init.h"
|
|
|
|
|
2015-02-28 15:39:11 +01:00
|
|
|
#include <dirent.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <fcntl.h>
|
2020-07-23 18:13:37 +02:00
|
|
|
#include <paths.h>
|
2018-02-02 02:14:30 +01:00
|
|
|
#include <pthread.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <signal.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2020-03-27 04:32:17 +01:00
|
|
|
#include <sys/eventfd.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
#include <sys/mount.h>
|
2017-09-06 22:43:57 +02:00
|
|
|
#include <sys/signalfd.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <sys/types.h>
|
2021-09-29 14:14:35 +02:00
|
|
|
#include <sys/utsname.h>
|
2015-02-07 05:15:18 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
|
2019-04-23 02:46:37 +02:00
|
|
|
#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
|
|
|
|
#include <sys/_system_properties.h>
|
|
|
|
|
2022-04-05 08:53:32 +02:00
|
|
|
#include <filesystem>
|
|
|
|
#include <fstream>
|
2019-06-26 23:44:37 +02:00
|
|
|
#include <functional>
|
2022-04-05 08:53:32 +02:00
|
|
|
#include <iostream>
|
2018-05-01 22:39:52 +02:00
|
|
|
#include <map>
|
|
|
|
#include <memory>
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
#include <mutex>
|
2018-05-01 22:39:52 +02:00
|
|
|
#include <optional>
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
#include <thread>
|
2019-06-26 23:44:37 +02:00
|
|
|
#include <vector>
|
2018-05-01 22:39:52 +02:00
|
|
|
|
2017-03-24 19:43:02 +01:00
|
|
|
#include <android-base/chrono_utils.h>
|
2015-12-05 07:00:26 +01:00
|
|
|
#include <android-base/file.h>
|
2017-04-07 01:30:22 +02:00
|
|
|
#include <android-base/logging.h>
|
2019-03-27 16:10:41 +01:00
|
|
|
#include <android-base/parseint.h>
|
2017-03-29 01:40:41 +02:00
|
|
|
#include <android-base/properties.h>
|
2018-02-21 19:37:44 +01:00
|
|
|
#include <android-base/stringprintf.h>
|
2015-12-05 07:00:26 +01:00
|
|
|
#include <android-base/strings.h>
|
2023-02-17 20:28:14 +01:00
|
|
|
#include <android-base/thread_annotations.h>
|
2019-03-04 10:53:34 +01:00
|
|
|
#include <fs_avb/fs_avb.h>
|
2018-10-29 10:31:48 +01:00
|
|
|
#include <fs_mgr_vendor_overlay.h>
|
2017-05-10 02:09:06 +02:00
|
|
|
#include <keyutils.h>
|
2017-02-14 15:06:20 +01:00
|
|
|
#include <libavb/libavb.h>
|
2019-02-14 21:46:13 +01:00
|
|
|
#include <libgsi/libgsi.h>
|
2020-10-30 08:00:33 +01:00
|
|
|
#include <libsnapshot/snapshot.h>
|
2022-07-12 07:29:16 +02:00
|
|
|
#include <logwrap/logwrap.h>
|
2018-12-21 20:41:50 +01:00
|
|
|
#include <processgroup/processgroup.h>
|
2019-03-06 00:47:16 +01:00
|
|
|
#include <processgroup/setup.h>
|
2018-09-06 00:37:26 +02:00
|
|
|
#include <selinux/android.h>
|
2022-05-11 23:42:38 +02:00
|
|
|
#include <unwindstack/AndroidUnwinder.h>
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2022-07-22 01:05:13 +02:00
|
|
|
#include "action.h"
|
|
|
|
#include "action_manager.h"
|
2018-02-14 00:25:29 +01:00
|
|
|
#include "action_parser.h"
|
2022-07-22 01:05:13 +02:00
|
|
|
#include "apex_init_util.h"
|
2015-10-25 01:20:18 +02:00
|
|
|
#include "epoll.h"
|
2019-05-08 21:44:50 +02:00
|
|
|
#include "first_stage_init.h"
|
2018-07-21 00:18:04 +02:00
|
|
|
#include "first_stage_mount.h"
|
2015-08-26 20:43:36 +02:00
|
|
|
#include "import_parser.h"
|
2015-07-31 21:45:25 +02:00
|
|
|
#include "keychords.h"
|
2019-10-23 02:18:42 +02:00
|
|
|
#include "lmkd_service.h"
|
2019-03-13 18:18:24 +01:00
|
|
|
#include "mount_handler.h"
|
Proper mount namespace configuration for bionic
This CL fixes the design problem of the previous mechanism for providing
the bootstrap bionic and the runtime bionic to the same path.
Previously, bootstrap bionic was self-bind-mounted; i.e.
/system/bin/libc.so is bind-mounted to itself. And the runtime bionic
was bind-mounted on top of the bootstrap bionic. This has not only caused
problems like `adb sync` not working(b/122737045), but also is quite
difficult to understand due to the double-and-self mounting.
This is the new design:
Most importantly, these four are all distinct:
1) bootstrap bionic (/system/lib/bootstrap/libc.so)
2) runtime bionic (/apex/com.android.runtime/lib/bionic/libc.so)
3) mount point for 1) and 2) (/bionic/lib/libc.so)
4) symlink for 3) (/system/lib/libc.so -> /bionic/lib/libc.so)
Inside the mount namespace of the pre-apexd processes, 1) is
bind-mounted to 3). Likewise, inside the mount namespace of the
post-apexd processes, 2) is bind-mounted to 3). In other words, there is
no self-mount, and no double-mount.
Another change is that mount points are under /bionic and the legacy
paths become symlinks to the mount points. This is to make sure that
there is no bind mounts under /system, which is breaking some apps.
Finally, code for creating mount namespaces, mounting bionic, etc are
refactored to mount_namespace.cpp
Bug: 120266448
Bug: 123275379
Test: m, device boots, adb sync/push/pull works,
especially with following paths:
/bionic/lib64/libc.so
/bionic/bin/linker64
/system/lib64/bootstrap/libc.so
/system/bin/bootstrap/linker64
Change-Id: Icdfbdcc1efca540ac854d4df79e07ee61fca559f
2019-01-16 15:00:59 +01:00
|
|
|
#include "mount_namespace.h"
|
2008-10-21 16:00:00 +02:00
|
|
|
#include "property_service.h"
|
2019-04-23 02:46:37 +02:00
|
|
|
#include "proto_utils.h"
|
2017-04-18 01:34:20 +02:00
|
|
|
#include "reboot.h"
|
2018-08-03 22:36:18 +02:00
|
|
|
#include "reboot_utils.h"
|
2020-10-07 01:58:19 +02:00
|
|
|
#include "second_stage_resources.h"
|
2017-08-10 21:22:44 +02:00
|
|
|
#include "security.h"
|
2019-05-29 00:58:35 +02:00
|
|
|
#include "selabel.h"
|
2017-08-10 21:22:44 +02:00
|
|
|
#include "selinux.h"
|
2019-06-26 19:46:20 +02:00
|
|
|
#include "service.h"
|
2022-07-16 00:02:14 +02:00
|
|
|
#include "service_list.h"
|
2019-06-26 19:46:20 +02:00
|
|
|
#include "service_parser.h"
|
2017-09-06 22:43:57 +02:00
|
|
|
#include "sigchld_handler.h"
|
init: Add an selinux transition for snapuserd.
With compressed VAB updates, it is not possible to mount /system without
first running snapuserd, which is the userspace component to the dm-user
kernel module. This poses a problem because as soon as selinux
enforcement is enabled, snapuserd (running in a kernel context) does not
have access to read and decompress the underlying system partition.
To account for this, we split SelinuxInitialize into multiple steps:
First, sepolicy is read into an in-memory string.
Second, the device-mapper tables for all snapshots are rebuilt. This
flushes any pending reads and creates new dm-user devices. The original
kernel-privileged snapuserd is then killed.
Third, sepolicy is loaded from the in-memory string.
Fourth, we re-launch snapuserd and connect it to the newly created
dm-user devices. As part of this step we restorecon device-mapper
devices and /dev/block/by-name/super, since the new snapuserd is in a
limited context.
Finally, we set enforcing mode.
This sequence ensures that snapuserd has appropriate privileges with a
minimal number of permissive audits.
Bug: 173476209
Test: full OTA with VABC applies and boots
Change-Id: Ie4e0f5166b01c31a6f337afc26fc58b96217604e
2020-12-08 09:21:20 +01:00
|
|
|
#include "snapuserd_transition.h"
|
2020-04-28 22:55:19 +02:00
|
|
|
#include "subcontext.h"
|
2019-04-23 02:46:37 +02:00
|
|
|
#include "system/core/init/property_service.pb.h"
|
2015-07-31 21:45:25 +02:00
|
|
|
#include "util.h"
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2022-05-09 22:35:35 +02:00
|
|
|
#ifndef RECOVERY
|
|
|
|
#include "com_android_apex.h"
|
|
|
|
#endif // RECOVERY
|
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
using namespace std::chrono_literals;
|
2017-06-23 01:50:31 +02:00
|
|
|
using namespace std::string_literals;
|
|
|
|
|
2017-03-24 19:43:02 +01:00
|
|
|
using android::base::boot_clock;
|
2020-03-04 19:52:08 +01:00
|
|
|
using android::base::ConsumePrefix;
|
2017-03-29 01:40:41 +02:00
|
|
|
using android::base::GetProperty;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::ReadFileToString;
|
2019-08-20 00:21:25 +02:00
|
|
|
using android::base::SetProperty;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::StringPrintf;
|
2017-07-06 23:20:11 +02:00
|
|
|
using android::base::Timer;
|
2018-02-21 19:37:44 +01:00
|
|
|
using android::base::Trim;
|
2019-03-04 10:53:34 +01:00
|
|
|
using android::fs_mgr::AvbHandle;
|
2020-10-30 08:00:33 +01:00
|
|
|
using android::snapshot::SnapshotManager;
|
2016-11-11 02:43:47 +01:00
|
|
|
|
2017-06-22 21:53:17 +02:00
|
|
|
namespace android {
|
|
|
|
namespace init {
|
|
|
|
|
2008-10-21 16:00:00 +02:00
|
|
|
static int property_triggers_enabled = 0;
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
static int signal_fd = -1;
|
2019-04-23 02:46:37 +02:00
|
|
|
static int property_fd = -1;
|
2015-04-25 06:13:44 +02:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
struct PendingControlMessage {
|
|
|
|
std::string message;
|
|
|
|
std::string name;
|
|
|
|
pid_t pid;
|
|
|
|
int fd;
|
|
|
|
};
|
|
|
|
static std::mutex pending_control_messages_lock;
|
|
|
|
static std::queue<PendingControlMessage> pending_control_messages;
|
|
|
|
|
|
|
|
// Init epolls various FDs to wait for various inputs. It previously waited on property changes
|
|
|
|
// with a blocking socket that contained the information related to the change, however, it was easy
|
|
|
|
// to fill that socket and deadlock the system. Now we use locks to handle the property changes
|
|
|
|
// directly in the property thread, however we still must wake the epoll to inform init that there
|
|
|
|
// is a change to process, so we use this FD. It is non-blocking, since we do not care how many
|
2020-03-27 04:32:17 +01:00
|
|
|
// times WakeMainInitThread() is called, only that the epoll will wake.
|
|
|
|
static int wake_main_thread_fd = -1;
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
static void InstallInitNotifier(Epoll* epoll) {
|
2020-03-27 04:32:17 +01:00
|
|
|
wake_main_thread_fd = eventfd(0, EFD_CLOEXEC);
|
|
|
|
if (wake_main_thread_fd == -1) {
|
|
|
|
PLOG(FATAL) << "Failed to create eventfd for waking init";
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
2020-03-27 04:32:17 +01:00
|
|
|
auto clear_eventfd = [] {
|
|
|
|
uint64_t counter;
|
|
|
|
TEMP_FAILURE_RETRY(read(wake_main_thread_fd, &counter, sizeof(counter)));
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
};
|
|
|
|
|
2020-03-27 04:32:17 +01:00
|
|
|
if (auto result = epoll->RegisterHandler(wake_main_thread_fd, clear_eventfd); !result.ok()) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
LOG(FATAL) << result.error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 04:32:17 +01:00
|
|
|
static void WakeMainInitThread() {
|
|
|
|
uint64_t counter = 1;
|
|
|
|
TEMP_FAILURE_RETRY(write(wake_main_thread_fd, &counter, sizeof(counter)));
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static class PropWaiterState {
|
|
|
|
public:
|
|
|
|
bool StartWaiting(const char* name, const char* value) {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
if (waiting_for_prop_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (GetProperty(name, "") != value) {
|
|
|
|
// Current property value is not equal to expected value
|
|
|
|
wait_prop_name_ = name;
|
|
|
|
wait_prop_value_ = value;
|
|
|
|
waiting_for_prop_.reset(new Timer());
|
|
|
|
} else {
|
|
|
|
LOG(INFO) << "start_waiting_for_property(\"" << name << "\", \"" << value
|
|
|
|
<< "\"): already set";
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetWaitForProp() {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
ResetWaitForPropLocked();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckAndResetWait(const std::string& name, const std::string& value) {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
// We always record how long init waited for ueventd to tell us cold boot finished.
|
|
|
|
// If we aren't waiting on this property, it means that ueventd finished before we even
|
|
|
|
// started to wait.
|
|
|
|
if (name == kColdBootDoneProp) {
|
|
|
|
auto time_waited = waiting_for_prop_ ? waiting_for_prop_->duration().count() : 0;
|
|
|
|
std::thread([time_waited] {
|
|
|
|
SetProperty("ro.boottime.init.cold_boot_wait", std::to_string(time_waited));
|
|
|
|
}).detach();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (waiting_for_prop_) {
|
|
|
|
if (wait_prop_name_ == name && wait_prop_value_ == value) {
|
|
|
|
LOG(INFO) << "Wait for property '" << wait_prop_name_ << "=" << wait_prop_value_
|
|
|
|
<< "' took " << *waiting_for_prop_;
|
|
|
|
ResetWaitForPropLocked();
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is not thread safe because it releases the lock when it returns, so the waiting state
|
|
|
|
// may change. However, we only use this function to prevent running commands in the main
|
|
|
|
// thread loop when we are waiting, so we do not care about false positives; only false
|
|
|
|
// negatives. StartWaiting() and this function are always called from the same thread, so false
|
|
|
|
// negatives are not possible and therefore we're okay.
|
|
|
|
bool MightBeWaiting() {
|
|
|
|
auto lock = std::lock_guard{lock_};
|
|
|
|
return static_cast<bool>(waiting_for_prop_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2023-02-17 21:27:57 +01:00
|
|
|
void ResetWaitForPropLocked() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
wait_prop_name_.clear();
|
|
|
|
wait_prop_value_.clear();
|
|
|
|
waiting_for_prop_.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::mutex lock_;
|
2023-02-17 21:27:57 +01:00
|
|
|
GUARDED_BY(lock_) std::unique_ptr<Timer> waiting_for_prop_{nullptr};
|
|
|
|
GUARDED_BY(lock_) std::string wait_prop_name_;
|
|
|
|
GUARDED_BY(lock_) std::string wait_prop_value_;
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
|
|
|
|
} prop_waiter_state;
|
|
|
|
|
|
|
|
bool start_waiting_for_property(const char* name, const char* value) {
|
|
|
|
return prop_waiter_state.StartWaiting(name, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetWaitForProp() {
|
|
|
|
prop_waiter_state.ResetWaitForProp();
|
|
|
|
}
|
|
|
|
|
|
|
|
static class ShutdownState {
|
|
|
|
public:
|
|
|
|
void TriggerShutdown(const std::string& command) {
|
|
|
|
// We can't call HandlePowerctlMessage() directly in this function,
|
|
|
|
// because it modifies the contents of the action queue, which can cause the action queue
|
|
|
|
// to get into a bad state if this function is called from a command being executed by the
|
|
|
|
// action queue. Instead we set this flag and ensure that shutdown happens before the next
|
|
|
|
// command is run in the main init loop.
|
|
|
|
auto lock = std::lock_guard{shutdown_command_lock_};
|
|
|
|
shutdown_command_ = command;
|
|
|
|
do_shutdown_ = true;
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
|
2023-02-17 20:27:33 +01:00
|
|
|
std::optional<std::string> CheckShutdown() __attribute__((warn_unused_result)) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
auto lock = std::lock_guard{shutdown_command_lock_};
|
|
|
|
if (do_shutdown_ && !IsShuttingDown()) {
|
2023-02-17 20:27:33 +01:00
|
|
|
do_shutdown_ = false;
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
return shutdown_command_;
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::mutex shutdown_command_lock_;
|
2023-02-17 20:28:14 +01:00
|
|
|
std::string shutdown_command_ GUARDED_BY(shutdown_command_lock_);
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
bool do_shutdown_ = false;
|
|
|
|
} shutdown_state;
|
|
|
|
|
2017-04-20 00:31:58 +02:00
|
|
|
void DumpState() {
|
2017-07-28 01:20:58 +02:00
|
|
|
ServiceList::GetInstance().DumpState();
|
2017-04-20 00:31:58 +02:00
|
|
|
ActionManager::GetInstance().DumpState();
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
Parser CreateParser(ActionManager& action_manager, ServiceList& service_list) {
|
2017-07-27 21:54:48 +02:00
|
|
|
Parser parser;
|
|
|
|
|
2019-09-18 22:47:19 +02:00
|
|
|
parser.AddSectionParser("service", std::make_unique<ServiceParser>(
|
2020-04-28 22:55:19 +02:00
|
|
|
&service_list, GetSubcontext(), std::nullopt));
|
|
|
|
parser.AddSectionParser("on", std::make_unique<ActionParser>(&action_manager, GetSubcontext()));
|
2017-07-27 21:54:48 +02:00
|
|
|
parser.AddSectionParser("import", std::make_unique<ImportParser>(&parser));
|
|
|
|
|
|
|
|
return parser;
|
|
|
|
}
|
|
|
|
|
2022-05-09 22:35:35 +02:00
|
|
|
#ifndef RECOVERY
|
|
|
|
template <typename T>
|
|
|
|
struct LibXmlErrorHandler {
|
|
|
|
T handler_;
|
|
|
|
template <typename Handler>
|
|
|
|
LibXmlErrorHandler(Handler&& handler) : handler_(std::move(handler)) {
|
|
|
|
xmlSetGenericErrorFunc(nullptr, &ErrorHandler);
|
|
|
|
}
|
|
|
|
~LibXmlErrorHandler() { xmlSetGenericErrorFunc(nullptr, nullptr); }
|
|
|
|
static void ErrorHandler(void*, const char* msg, ...) {
|
|
|
|
va_list args;
|
|
|
|
va_start(args, msg);
|
|
|
|
char* formatted;
|
|
|
|
if (vasprintf(&formatted, msg, args) >= 0) {
|
|
|
|
LOG(ERROR) << formatted;
|
|
|
|
}
|
|
|
|
free(formatted);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename Handler>
|
|
|
|
LibXmlErrorHandler(Handler&&) -> LibXmlErrorHandler<Handler>;
|
|
|
|
#endif // RECOVERY
|
|
|
|
|
2022-05-09 20:16:51 +02:00
|
|
|
// Returns a Parser that accepts scripts from APEX modules. It supports `service` and `on`.
|
|
|
|
Parser CreateApexConfigParser(ActionManager& action_manager, ServiceList& service_list) {
|
2018-11-08 09:14:35 +01:00
|
|
|
Parser parser;
|
2022-05-09 22:35:35 +02:00
|
|
|
auto subcontext = GetSubcontext();
|
|
|
|
#ifndef RECOVERY
|
|
|
|
if (subcontext) {
|
|
|
|
const auto apex_info_list_file = "/apex/apex-info-list.xml";
|
|
|
|
auto error_handler = LibXmlErrorHandler([&](const auto& error_message) {
|
|
|
|
LOG(ERROR) << "Failed to read " << apex_info_list_file << ":" << error_message;
|
|
|
|
});
|
|
|
|
const auto apex_info_list = com::android::apex::readApexInfoList(apex_info_list_file);
|
|
|
|
if (apex_info_list.has_value()) {
|
|
|
|
std::vector<std::string> subcontext_apexes;
|
|
|
|
for (const auto& info : apex_info_list->getApexInfo()) {
|
|
|
|
if (info.hasPreinstalledModulePath() &&
|
|
|
|
subcontext->PathMatchesSubcontext(info.getPreinstalledModulePath())) {
|
|
|
|
subcontext_apexes.push_back(info.getModuleName());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
subcontext->SetApexList(std::move(subcontext_apexes));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // RECOVERY
|
|
|
|
parser.AddSectionParser("service",
|
2022-07-15 00:51:10 +02:00
|
|
|
std::make_unique<ServiceParser>(&service_list, subcontext,
|
|
|
|
std::nullopt));
|
2022-05-09 22:35:35 +02:00
|
|
|
parser.AddSectionParser("on", std::make_unique<ActionParser>(&action_manager, subcontext));
|
2022-05-09 20:16:51 +02:00
|
|
|
|
2018-11-08 09:14:35 +01:00
|
|
|
return parser;
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
static void LoadBootScripts(ActionManager& action_manager, ServiceList& service_list) {
|
|
|
|
Parser parser = CreateParser(action_manager, service_list);
|
2017-07-27 21:54:48 +02:00
|
|
|
|
|
|
|
std::string bootscript = GetProperty("ro.boot.init_rc", "");
|
|
|
|
if (bootscript.empty()) {
|
2019-11-04 19:30:36 +01:00
|
|
|
parser.ParseConfig("/system/etc/init/hw/init.rc");
|
2017-07-27 21:54:48 +02:00
|
|
|
if (!parser.ParseConfig("/system/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/system/etc/init");
|
|
|
|
}
|
2019-06-28 07:28:00 +02:00
|
|
|
// late_import is available only in Q and earlier release. As we don't
|
|
|
|
// have system_ext in those versions, skip late_import for system_ext.
|
|
|
|
parser.ParseConfig("/system_ext/etc/init");
|
2020-08-07 03:11:48 +02:00
|
|
|
if (!parser.ParseConfig("/vendor/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/vendor/etc/init");
|
2017-07-27 21:54:48 +02:00
|
|
|
}
|
|
|
|
if (!parser.ParseConfig("/odm/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/odm/etc/init");
|
|
|
|
}
|
2020-08-07 03:11:48 +02:00
|
|
|
if (!parser.ParseConfig("/product/etc/init")) {
|
|
|
|
late_import_paths.emplace_back("/product/etc/init");
|
2017-11-28 04:10:10 +01:00
|
|
|
}
|
2017-07-27 21:54:48 +02:00
|
|
|
} else {
|
|
|
|
parser.ParseConfig(bootscript);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
void PropertyChanged(const std::string& name, const std::string& value) {
|
2017-04-18 01:34:20 +02:00
|
|
|
// If the property is sys.powerctl, we bypass the event queue and immediately handle it.
|
|
|
|
// This is to ensure that init will always and immediately shutdown/reboot, regardless of
|
|
|
|
// if there are other pending events to process or if init is waiting on an exec service or
|
|
|
|
// waiting on a property.
|
2017-06-28 07:08:45 +02:00
|
|
|
// In non-thermal-shutdown case, 'shutdown' trigger will be fired to let device specific
|
|
|
|
// commands to be executed.
|
|
|
|
if (name == "sys.powerctl") {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
trigger_shutdown(value);
|
2017-06-28 07:08:45 +02:00
|
|
|
}
|
2017-04-18 01:34:20 +02:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (property_triggers_enabled) {
|
|
|
|
ActionManager::GetInstance().QueuePropertyChange(name, value);
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
2019-06-11 02:49:59 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
prop_waiter_state.CheckAndResetWait(name, value);
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
2018-09-28 01:10:46 +02:00
|
|
|
static std::optional<boot_clock::time_point> HandleProcessActions() {
|
|
|
|
std::optional<boot_clock::time_point> next_process_action_time;
|
2017-07-28 01:20:58 +02:00
|
|
|
for (const auto& s : ServiceList::GetInstance()) {
|
2018-09-28 01:10:46 +02:00
|
|
|
if ((s->flags() & SVC_RUNNING) && s->timeout_period()) {
|
|
|
|
auto timeout_time = s->time_started() + *s->timeout_period();
|
|
|
|
if (boot_clock::now() > timeout_time) {
|
|
|
|
s->Timeout();
|
|
|
|
} else {
|
|
|
|
if (!next_process_action_time || timeout_time < *next_process_action_time) {
|
|
|
|
next_process_action_time = timeout_time;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-28 01:20:58 +02:00
|
|
|
if (!(s->flags() & SVC_RESTARTING)) continue;
|
2017-07-31 22:23:18 +02:00
|
|
|
|
2018-09-28 01:10:46 +02:00
|
|
|
auto restart_time = s->time_started() + s->restart_period();
|
2017-07-31 22:23:18 +02:00
|
|
|
if (boot_clock::now() > restart_time) {
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = s->Start(); !result.ok()) {
|
2017-08-25 19:36:52 +02:00
|
|
|
LOG(ERROR) << "Could not restart process '" << s->name() << "': " << result.error();
|
|
|
|
}
|
2017-07-31 22:23:18 +02:00
|
|
|
} else {
|
2018-09-28 01:10:46 +02:00
|
|
|
if (!next_process_action_time || restart_time < *next_process_action_time) {
|
|
|
|
next_process_action_time = restart_time;
|
2017-07-31 22:23:18 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-28 01:20:58 +02:00
|
|
|
}
|
2018-09-28 01:10:46 +02:00
|
|
|
return next_process_action_time;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
2020-03-10 19:47:24 +01:00
|
|
|
static Result<void> DoControlStart(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
return service->Start();
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> DoControlStop(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
service->Stop();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-10 19:47:24 +01:00
|
|
|
static Result<void> DoControlRestart(Service* service) {
|
2017-10-06 03:50:22 +02:00
|
|
|
service->Restart();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
2022-07-16 00:02:14 +02:00
|
|
|
int StopServicesFromApex(const std::string& apex_name) {
|
|
|
|
auto services = ServiceList::GetInstance().FindServicesByApexName(apex_name);
|
|
|
|
if (services.empty()) {
|
|
|
|
LOG(INFO) << "No service found for APEX: " << apex_name;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
std::set<std::string> service_names;
|
|
|
|
for (const auto& service : services) {
|
|
|
|
service_names.emplace(service->name());
|
|
|
|
}
|
|
|
|
constexpr std::chrono::milliseconds kServiceStopTimeout = 10s;
|
|
|
|
int still_running = StopServicesAndLogViolations(service_names, kServiceStopTimeout,
|
|
|
|
true /*SIGTERM*/);
|
|
|
|
// Send SIGKILL to ones that didn't terminate cleanly.
|
|
|
|
if (still_running > 0) {
|
|
|
|
still_running = StopServicesAndLogViolations(service_names, 0ms, false /*SIGKILL*/);
|
|
|
|
}
|
|
|
|
return still_running;
|
|
|
|
}
|
|
|
|
|
2022-07-22 01:05:13 +02:00
|
|
|
void RemoveServiceAndActionFromApex(const std::string& apex_name) {
|
|
|
|
// Remove services and actions that match apex name
|
|
|
|
ActionManager::GetInstance().RemoveActionIf([&](const std::unique_ptr<Action>& action) -> bool {
|
|
|
|
if (GetApexNameFromFileName(action->filename()) == apex_name) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
});
|
|
|
|
ServiceList::GetInstance().RemoveServiceIf([&](const std::unique_ptr<Service>& s) -> bool {
|
|
|
|
if (GetApexNameFromFileName(s->filename()) == apex_name) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-07-12 07:23:37 +02:00
|
|
|
static Result<void> DoUnloadApex(const std::string& apex_name) {
|
2022-07-16 00:02:14 +02:00
|
|
|
if (StopServicesFromApex(apex_name) > 0) {
|
|
|
|
return Error() << "Unable to stop all service from " << apex_name;
|
|
|
|
}
|
2022-07-22 01:05:13 +02:00
|
|
|
RemoveServiceAndActionFromApex(apex_name);
|
2022-07-12 07:23:37 +02:00
|
|
|
return {};
|
2022-07-07 08:25:02 +02:00
|
|
|
}
|
|
|
|
|
2022-07-12 07:29:16 +02:00
|
|
|
static Result<void> UpdateApexLinkerConfig(const std::string& apex_name) {
|
|
|
|
// Do not invoke linkerconfig when there's no bin/ in the apex.
|
|
|
|
const std::string bin_path = "/apex/" + apex_name + "/bin";
|
|
|
|
if (access(bin_path.c_str(), R_OK) != 0) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
const char* linkerconfig_binary = "/apex/com.android.runtime/bin/linkerconfig";
|
|
|
|
const char* linkerconfig_target = "/linkerconfig";
|
|
|
|
const char* arguments[] = {linkerconfig_binary, "--target", linkerconfig_target, "--apex",
|
|
|
|
apex_name.c_str(), "--strict"};
|
|
|
|
|
|
|
|
if (logwrap_fork_execvp(arraysize(arguments), arguments, nullptr, false, LOG_KLOG, false,
|
|
|
|
nullptr) != 0) {
|
|
|
|
return ErrnoError() << "failed to execute linkerconfig";
|
|
|
|
}
|
|
|
|
LOG(INFO) << "Generated linker configuration for " << apex_name;
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-07-12 07:23:37 +02:00
|
|
|
static Result<void> DoLoadApex(const std::string& apex_name) {
|
2022-11-02 03:58:48 +01:00
|
|
|
if (auto result = ParseApexConfigs(apex_name); !result.ok()) {
|
2022-07-22 01:05:13 +02:00
|
|
|
return result.error();
|
|
|
|
}
|
2022-07-12 07:29:16 +02:00
|
|
|
|
|
|
|
if (auto result = UpdateApexLinkerConfig(apex_name); !result.ok()) {
|
|
|
|
return result.error();
|
|
|
|
}
|
|
|
|
|
2022-07-12 07:23:37 +02:00
|
|
|
return {};
|
2022-07-07 08:25:02 +02:00
|
|
|
}
|
|
|
|
|
2017-10-06 03:50:22 +02:00
|
|
|
enum class ControlTarget {
|
|
|
|
SERVICE, // function gets called for the named service
|
|
|
|
INTERFACE, // action gets called for every service that holds this interface
|
|
|
|
};
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
using ControlMessageFunction = std::function<Result<void>(Service*)>;
|
2017-10-06 03:50:22 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
static const std::map<std::string, ControlMessageFunction, std::less<>>& GetControlMessageMap() {
|
2017-10-06 03:50:22 +02:00
|
|
|
// clang-format off
|
2020-03-04 19:52:08 +01:00
|
|
|
static const std::map<std::string, ControlMessageFunction, std::less<>> control_message_functions = {
|
|
|
|
{"sigstop_on", [](auto* service) { service->set_sigstop(true); return Result<void>{}; }},
|
|
|
|
{"sigstop_off", [](auto* service) { service->set_sigstop(false); return Result<void>{}; }},
|
|
|
|
{"oneshot_on", [](auto* service) { service->set_oneshot(true); return Result<void>{}; }},
|
|
|
|
{"oneshot_off", [](auto* service) { service->set_oneshot(false); return Result<void>{}; }},
|
|
|
|
{"start", DoControlStart},
|
|
|
|
{"stop", DoControlStop},
|
|
|
|
{"restart", DoControlRestart},
|
2017-10-06 03:50:22 +02:00
|
|
|
};
|
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
return control_message_functions;
|
|
|
|
}
|
|
|
|
|
2022-07-12 07:23:37 +02:00
|
|
|
static Result<void> HandleApexControlMessage(std::string_view action, const std::string& name,
|
|
|
|
std::string_view message) {
|
2022-07-07 08:25:02 +02:00
|
|
|
if (action == "load") {
|
2022-07-12 07:23:37 +02:00
|
|
|
return DoLoadApex(name);
|
2022-07-07 08:25:02 +02:00
|
|
|
} else if (action == "unload") {
|
2022-07-12 07:23:37 +02:00
|
|
|
return DoUnloadApex(name);
|
2022-07-07 08:25:02 +02:00
|
|
|
} else {
|
2022-07-12 07:23:37 +02:00
|
|
|
return Error() << "Unknown control msg '" << message << "'";
|
2022-07-07 08:25:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
static bool HandleControlMessage(std::string_view message, const std::string& name,
|
|
|
|
pid_t from_pid) {
|
|
|
|
std::string cmdline_path = StringPrintf("proc/%d/cmdline", from_pid);
|
2018-02-21 19:37:44 +01:00
|
|
|
std::string process_cmdline;
|
|
|
|
if (ReadFileToString(cmdline_path, &process_cmdline)) {
|
|
|
|
std::replace(process_cmdline.begin(), process_cmdline.end(), '\0', ' ');
|
|
|
|
process_cmdline = Trim(process_cmdline);
|
|
|
|
} else {
|
|
|
|
process_cmdline = "unknown process";
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
auto action = message;
|
2022-07-07 08:25:02 +02:00
|
|
|
if (ConsumePrefix(&action, "apex_")) {
|
2022-07-12 07:23:37 +02:00
|
|
|
if (auto result = HandleApexControlMessage(action, name, message); !result.ok()) {
|
|
|
|
LOG(ERROR) << "Control message: Could not ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline
|
|
|
|
<< "): " << result.error();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
LOG(INFO) << "Control message: Processed ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline << ")";
|
|
|
|
return true;
|
2022-07-07 08:25:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Service* service = nullptr;
|
2020-03-04 19:52:08 +01:00
|
|
|
if (ConsumePrefix(&action, "interface_")) {
|
|
|
|
service = ServiceList::GetInstance().FindInterface(name);
|
|
|
|
} else {
|
|
|
|
service = ServiceList::GetInstance().FindService(name);
|
|
|
|
}
|
2018-05-08 22:46:39 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
if (service == nullptr) {
|
|
|
|
LOG(ERROR) << "Control message: Could not find '" << name << "' for ctl." << message
|
|
|
|
<< " from pid: " << from_pid << " (" << process_cmdline << ")";
|
|
|
|
return false;
|
2017-10-06 03:50:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
const auto& map = GetControlMessageMap();
|
|
|
|
const auto it = map.find(action);
|
|
|
|
if (it == map.end()) {
|
|
|
|
LOG(ERROR) << "Unknown control msg '" << message << "'";
|
2019-05-23 21:13:29 +02:00
|
|
|
return false;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
2020-03-04 19:52:08 +01:00
|
|
|
const auto& function = it->second;
|
2017-10-06 03:50:22 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
if (auto result = function(service); !result.ok()) {
|
|
|
|
LOG(ERROR) << "Control message: Could not ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline
|
|
|
|
<< "): " << result.error();
|
2019-05-23 21:13:29 +02:00
|
|
|
return false;
|
2018-05-08 22:46:39 +02:00
|
|
|
}
|
2019-08-29 03:34:24 +02:00
|
|
|
|
2020-03-04 19:52:08 +01:00
|
|
|
LOG(INFO) << "Control message: Processed ctl." << message << " for '" << name
|
|
|
|
<< "' from pid: " << from_pid << " (" << process_cmdline << ")";
|
2019-05-23 21:13:29 +02:00
|
|
|
return true;
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
bool QueueControlMessage(const std::string& message, const std::string& name, pid_t pid, int fd) {
|
|
|
|
auto lock = std::lock_guard{pending_control_messages_lock};
|
|
|
|
if (pending_control_messages.size() > 100) {
|
|
|
|
LOG(ERROR) << "Too many pending control messages, dropped '" << message << "' for '" << name
|
|
|
|
<< "' from pid: " << pid;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
pending_control_messages.push({message, name, pid, fd});
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void HandleControlMessages() {
|
|
|
|
auto lock = std::unique_lock{pending_control_messages_lock};
|
|
|
|
// Init historically would only execute handle one property message, including control messages
|
|
|
|
// in each iteration of its main loop. We retain this behavior here to prevent starvation of
|
|
|
|
// other actions in the main loop.
|
|
|
|
if (!pending_control_messages.empty()) {
|
|
|
|
auto control_message = pending_control_messages.front();
|
|
|
|
pending_control_messages.pop();
|
|
|
|
lock.unlock();
|
|
|
|
|
|
|
|
bool success = HandleControlMessage(control_message.message, control_message.name,
|
|
|
|
control_message.pid);
|
|
|
|
|
|
|
|
uint32_t response = success ? PROP_SUCCESS : PROP_ERROR_HANDLE_CONTROL_MESSAGE;
|
|
|
|
if (control_message.fd != -1) {
|
|
|
|
TEMP_FAILURE_RETRY(send(control_message.fd, &response, sizeof(response), 0));
|
|
|
|
close(control_message.fd);
|
|
|
|
}
|
|
|
|
lock.lock();
|
|
|
|
}
|
|
|
|
// If we still have items to process, make sure we wake back up to do so.
|
|
|
|
if (!pending_control_messages.empty()) {
|
2020-03-27 04:32:17 +01:00
|
|
|
WakeMainInitThread();
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> wait_for_coldboot_done_action(const BuiltinArguments& args) {
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (!prop_waiter_state.StartWaiting(kColdBootDoneProp, "true")) {
|
2019-06-11 02:49:59 +02:00
|
|
|
LOG(FATAL) << "Could not wait for '" << kColdBootDoneProp << "'";
|
2015-04-25 03:50:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> SetupCgroupsAction(const BuiltinArguments&) {
|
2022-10-20 15:14:39 +02:00
|
|
|
if (!CgroupsAvailable()) {
|
|
|
|
LOG(INFO) << "Cgroups support in kernel is not enabled";
|
|
|
|
return {};
|
|
|
|
}
|
2018-12-21 20:41:50 +01:00
|
|
|
// Have to create <CGROUPS_RC_DIR> using make_dir function
|
|
|
|
// for appropriate sepolicy to be set for it
|
2019-03-26 21:34:32 +01:00
|
|
|
make_dir(android::base::Dirname(CGROUPS_RC_PATH), 0711);
|
2019-04-03 00:10:40 +02:00
|
|
|
if (!CgroupSetup()) {
|
2018-12-21 20:41:50 +01:00
|
|
|
return ErrnoError() << "Failed to setup cgroups";
|
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2018-12-21 20:41:50 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 13:33:58 +01:00
|
|
|
static void export_oem_lock_status() {
|
2017-03-29 01:40:41 +02:00
|
|
|
if (!android::base::GetBoolProperty("ro.oem_unlock_supported", false)) {
|
2015-12-07 13:33:58 +01:00
|
|
|
return;
|
|
|
|
}
|
2021-03-09 22:49:59 +01:00
|
|
|
SetProperty(
|
|
|
|
"ro.boot.flash.locked",
|
|
|
|
android::base::GetProperty("ro.boot.verifiedbootstate", "") == "orange" ? "0" : "1");
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> property_enable_triggers_action(const BuiltinArguments& args) {
|
2016-11-26 14:14:07 +01:00
|
|
|
/* Enable property triggers. */
|
|
|
|
property_triggers_enabled = 1;
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2016-11-26 14:14:07 +01:00
|
|
|
}
|
|
|
|
|
2019-06-10 20:08:01 +02:00
|
|
|
static Result<void> queue_property_triggers_action(const BuiltinArguments& args) {
|
2016-11-26 14:14:07 +01:00
|
|
|
ActionManager::GetInstance().QueueBuiltinAction(property_enable_triggers_action, "enable_property_trigger");
|
2017-04-17 22:25:29 +02:00
|
|
|
ActionManager::GetInstance().QueueAllPropertyActions();
|
2019-06-10 20:08:01 +02:00
|
|
|
return {};
|
2010-04-14 04:52:01 +02:00
|
|
|
}
|
|
|
|
|
2016-06-29 15:30:00 +02:00
|
|
|
// Set the UDC controller for the ConfigFS USB Gadgets.
|
|
|
|
// Read the UDC controller in use from "/sys/class/udc".
|
|
|
|
// In case of multiple UDC controllers select the first one.
|
2020-05-13 03:13:14 +02:00
|
|
|
static void SetUsbController() {
|
|
|
|
static auto controller_set = false;
|
|
|
|
if (controller_set) return;
|
2016-06-29 15:30:00 +02:00
|
|
|
std::unique_ptr<DIR, decltype(&closedir)>dir(opendir("/sys/class/udc"), closedir);
|
|
|
|
if (!dir) return;
|
|
|
|
|
|
|
|
dirent* dp;
|
|
|
|
while ((dp = readdir(dir.get())) != nullptr) {
|
|
|
|
if (dp->d_name[0] == '.') continue;
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("sys.usb.controller", dp->d_name);
|
2020-05-13 03:13:14 +02:00
|
|
|
controller_set = true;
|
2016-06-29 15:30:00 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-29 14:14:35 +02:00
|
|
|
/// Set ro.kernel.version property to contain the major.minor pair as returned
|
|
|
|
/// by uname(2).
|
|
|
|
static void SetKernelVersion() {
|
|
|
|
struct utsname uts;
|
|
|
|
unsigned int major, minor;
|
|
|
|
|
|
|
|
if ((uname(&uts) != 0) || (sscanf(uts.release, "%u.%u", &major, &minor) != 2)) {
|
|
|
|
LOG(ERROR) << "Could not parse the kernel version from uname";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SetProperty("ro.kernel.version", android::base::StringPrintf("%u.%u", major, minor));
|
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
static void HandleSigtermSignal(const signalfd_siginfo& siginfo) {
|
2017-09-06 22:43:57 +02:00
|
|
|
if (siginfo.ssi_pid != 0) {
|
|
|
|
// Drop any userspace SIGTERM requests.
|
|
|
|
LOG(DEBUG) << "Ignoring SIGTERM from pid " << siginfo.ssi_pid;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-09-26 17:26:12 +02:00
|
|
|
HandlePowerctlMessage("shutdown,container");
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2022-11-29 21:56:34 +01:00
|
|
|
static void HandleSignalFd() {
|
2018-04-12 03:46:38 +02:00
|
|
|
signalfd_siginfo siginfo;
|
2022-11-29 21:56:34 +01:00
|
|
|
ssize_t bytes_read = TEMP_FAILURE_RETRY(read(signal_fd, &siginfo, sizeof(siginfo)));
|
|
|
|
if (bytes_read != sizeof(siginfo)) {
|
|
|
|
PLOG(ERROR) << "Failed to read siginfo from signal_fd";
|
|
|
|
return;
|
|
|
|
}
|
2018-04-12 03:46:38 +02:00
|
|
|
|
|
|
|
switch (siginfo.ssi_signo) {
|
|
|
|
case SIGCHLD:
|
|
|
|
ReapAnyOutstandingChildren();
|
|
|
|
break;
|
|
|
|
case SIGTERM:
|
|
|
|
HandleSigtermSignal(siginfo);
|
|
|
|
break;
|
|
|
|
default:
|
2022-10-26 02:07:36 +02:00
|
|
|
LOG(ERROR) << "signal_fd: received unexpected signal " << siginfo.ssi_signo;
|
2018-04-12 03:46:38 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void UnblockSignals() {
|
|
|
|
const struct sigaction act { .sa_handler = SIG_DFL };
|
|
|
|
sigaction(SIGCHLD, &act, nullptr);
|
|
|
|
|
2018-02-02 02:14:30 +01:00
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
2018-04-12 03:46:38 +02:00
|
|
|
sigaddset(&mask, SIGCHLD);
|
2018-02-02 02:14:30 +01:00
|
|
|
sigaddset(&mask, SIGTERM);
|
|
|
|
|
|
|
|
if (sigprocmask(SIG_UNBLOCK, &mask, nullptr) == -1) {
|
2018-04-12 03:46:38 +02:00
|
|
|
PLOG(FATAL) << "failed to unblock signals for PID " << getpid();
|
2018-02-02 02:14:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
static void InstallSignalFdHandler(Epoll* epoll) {
|
2018-04-12 03:46:38 +02:00
|
|
|
// Applying SA_NOCLDSTOP to a defaulted SIGCHLD handler prevents the signalfd from receiving
|
|
|
|
// SIGCHLD when a child process stops or continues (b/77867680#comment9).
|
|
|
|
const struct sigaction act { .sa_handler = SIG_DFL, .sa_flags = SA_NOCLDSTOP };
|
|
|
|
sigaction(SIGCHLD, &act, nullptr);
|
|
|
|
|
2017-09-06 22:43:57 +02:00
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
2018-04-12 03:46:38 +02:00
|
|
|
sigaddset(&mask, SIGCHLD);
|
|
|
|
|
|
|
|
if (!IsRebootCapable()) {
|
|
|
|
// If init does not have the CAP_SYS_BOOT capability, it is running in a container.
|
|
|
|
// In that case, receiving SIGTERM will cause the system to shut down.
|
|
|
|
sigaddset(&mask, SIGTERM);
|
|
|
|
}
|
2017-09-06 22:43:57 +02:00
|
|
|
|
|
|
|
if (sigprocmask(SIG_BLOCK, &mask, nullptr) == -1) {
|
2018-04-12 03:46:38 +02:00
|
|
|
PLOG(FATAL) << "failed to block signals";
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2018-04-12 03:46:38 +02:00
|
|
|
// Register a handler to unblock signals in the child processes.
|
|
|
|
const int result = pthread_atfork(nullptr, nullptr, &UnblockSignals);
|
2018-02-02 02:14:30 +01:00
|
|
|
if (result != 0) {
|
|
|
|
LOG(FATAL) << "Failed to register a fork handler: " << strerror(result);
|
|
|
|
}
|
|
|
|
|
2022-11-29 21:56:34 +01:00
|
|
|
signal_fd = signalfd(-1, &mask, SFD_CLOEXEC);
|
2018-04-12 03:46:38 +02:00
|
|
|
if (signal_fd == -1) {
|
|
|
|
PLOG(FATAL) << "failed to create signalfd";
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2022-03-08 04:10:57 +01:00
|
|
|
constexpr int flags = EPOLLIN | EPOLLPRI;
|
2022-11-29 21:56:34 +01:00
|
|
|
if (auto result = epoll->RegisterHandler(signal_fd, HandleSignalFd, flags); !result.ok()) {
|
2015-10-25 01:20:18 +02:00
|
|
|
LOG(FATAL) << result.error();
|
|
|
|
}
|
2017-09-06 22:43:57 +02:00
|
|
|
}
|
|
|
|
|
2018-05-19 00:25:15 +02:00
|
|
|
void HandleKeychord(const std::vector<int>& keycodes) {
|
2018-05-17 00:10:24 +02:00
|
|
|
// Only handle keychords if adb is enabled.
|
|
|
|
std::string adb_enabled = android::base::GetProperty("init.svc.adbd", "");
|
2018-05-19 00:25:15 +02:00
|
|
|
if (adb_enabled != "running") {
|
|
|
|
LOG(WARNING) << "Not starting service for keychord " << android::base::Join(keycodes, ' ')
|
|
|
|
<< " because ADB is disabled";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto found = false;
|
|
|
|
for (const auto& service : ServiceList::GetInstance()) {
|
|
|
|
auto svc = service.get();
|
|
|
|
if (svc->keycodes() == keycodes) {
|
|
|
|
found = true;
|
|
|
|
LOG(INFO) << "Starting service '" << svc->name() << "' from keychord "
|
|
|
|
<< android::base::Join(keycodes, ' ');
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = svc->Start(); !result.ok()) {
|
2018-05-19 00:25:15 +02:00
|
|
|
LOG(ERROR) << "Could not start service '" << svc->name() << "' from keychord "
|
|
|
|
<< android::base::Join(keycodes, ' ') << ": " << result.error();
|
2018-05-17 00:10:24 +02:00
|
|
|
}
|
|
|
|
}
|
2018-05-19 00:25:15 +02:00
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
LOG(ERROR) << "Service for keychord " << android::base::Join(keycodes, ' ') << " not found";
|
2018-05-17 00:10:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-11 17:57:24 +02:00
|
|
|
static void UmountDebugRamdisk() {
|
|
|
|
if (umount("/debug_ramdisk") != 0) {
|
2019-11-22 08:14:10 +01:00
|
|
|
PLOG(ERROR) << "Failed to umount /debug_ramdisk";
|
2019-04-11 17:57:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-07 01:58:19 +02:00
|
|
|
static void UmountSecondStageRes() {
|
|
|
|
if (umount(kSecondStageRes) != 0) {
|
|
|
|
PLOG(ERROR) << "Failed to umount " << kSecondStageRes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 08:14:10 +01:00
|
|
|
static void MountExtraFilesystems() {
|
|
|
|
#define CHECKCALL(x) \
|
|
|
|
if ((x) != 0) PLOG(FATAL) << #x " failed.";
|
|
|
|
|
|
|
|
// /apex is used to mount APEXes
|
|
|
|
CHECKCALL(mount("tmpfs", "/apex", "tmpfs", MS_NOEXEC | MS_NOSUID | MS_NODEV,
|
|
|
|
"mode=0755,uid=0,gid=0"));
|
|
|
|
|
|
|
|
// /linkerconfig is used to keep generated linker configuration
|
|
|
|
CHECKCALL(mount("tmpfs", "/linkerconfig", "tmpfs", MS_NOEXEC | MS_NOSUID | MS_NODEV,
|
|
|
|
"mode=0755,uid=0,gid=0"));
|
|
|
|
#undef CHECKCALL
|
|
|
|
}
|
|
|
|
|
2019-03-27 16:10:41 +01:00
|
|
|
static void RecordStageBoottimes(const boot_clock::time_point& second_stage_start_time) {
|
|
|
|
int64_t first_stage_start_time_ns = -1;
|
2019-05-08 21:44:50 +02:00
|
|
|
if (auto first_stage_start_time_str = getenv(kEnvFirstStageStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
first_stage_start_time_str) {
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("ro.boottime.init", first_stage_start_time_str);
|
2019-03-27 16:10:41 +01:00
|
|
|
android::base::ParseInt(first_stage_start_time_str, &first_stage_start_time_ns);
|
|
|
|
}
|
2019-05-08 21:44:50 +02:00
|
|
|
unsetenv(kEnvFirstStageStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
|
|
|
|
int64_t selinux_start_time_ns = -1;
|
2019-05-08 21:44:50 +02:00
|
|
|
if (auto selinux_start_time_str = getenv(kEnvSelinuxStartedAt); selinux_start_time_str) {
|
2019-03-27 16:10:41 +01:00
|
|
|
android::base::ParseInt(selinux_start_time_str, &selinux_start_time_ns);
|
|
|
|
}
|
2019-05-08 21:44:50 +02:00
|
|
|
unsetenv(kEnvSelinuxStartedAt);
|
2019-03-27 16:10:41 +01:00
|
|
|
|
|
|
|
if (selinux_start_time_ns == -1) return;
|
|
|
|
if (first_stage_start_time_ns == -1) return;
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
SetProperty("ro.boottime.init.first_stage",
|
|
|
|
std::to_string(selinux_start_time_ns - first_stage_start_time_ns));
|
|
|
|
SetProperty("ro.boottime.init.selinux",
|
|
|
|
std::to_string(second_stage_start_time.time_since_epoch().count() -
|
|
|
|
selinux_start_time_ns));
|
2021-02-03 11:22:21 +01:00
|
|
|
if (auto init_module_time_str = getenv(kEnvInitModuleDurationMs); init_module_time_str) {
|
|
|
|
SetProperty("ro.boottime.init.modules", init_module_time_str);
|
|
|
|
unsetenv(kEnvInitModuleDurationMs);
|
|
|
|
}
|
2019-03-27 16:10:41 +01:00
|
|
|
}
|
|
|
|
|
2019-04-23 02:46:37 +02:00
|
|
|
void SendLoadPersistentPropertiesMessage() {
|
|
|
|
auto init_message = InitMessage{};
|
|
|
|
init_message.set_load_persistent_properties(true);
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = SendMessage(property_fd, init_message); !result.ok()) {
|
2019-04-23 02:46:37 +02:00
|
|
|
LOG(ERROR) << "Failed to send load persistent properties message: " << result.error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-22 06:53:28 +02:00
|
|
|
static Result<void> ConnectEarlyStageSnapuserdAction(const BuiltinArguments& args) {
|
|
|
|
auto pid = GetSnapuserdFirstStagePid();
|
|
|
|
if (!pid) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
auto info = GetSnapuserdFirstStageInfo();
|
|
|
|
if (auto iter = std::find(info.begin(), info.end(), "socket"s); iter == info.end()) {
|
|
|
|
// snapuserd does not support socket handoff, so exit early.
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Socket handoff is supported.
|
|
|
|
auto svc = ServiceList::GetInstance().FindService("snapuserd");
|
|
|
|
if (!svc) {
|
|
|
|
LOG(FATAL) << "Failed to find snapuserd service entry";
|
|
|
|
}
|
|
|
|
|
|
|
|
svc->SetShutdownCritical();
|
|
|
|
svc->SetStartedInFirstStage(*pid);
|
|
|
|
|
|
|
|
svc = ServiceList::GetInstance().FindService("snapuserd_proxy");
|
|
|
|
if (!svc) {
|
|
|
|
LOG(FATAL) << "Failed find snapuserd_proxy service entry, merge will never initiate";
|
|
|
|
}
|
|
|
|
if (!svc->MarkSocketPersistent("snapuserd")) {
|
|
|
|
LOG(FATAL) << "Could not find snapuserd socket in snapuserd_proxy service entry";
|
|
|
|
}
|
|
|
|
if (auto result = svc->Start(); !result.ok()) {
|
|
|
|
LOG(FATAL) << "Could not start snapuserd_proxy: " << result.error();
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2018-11-06 23:12:05 +01:00
|
|
|
int SecondStageMain(int argc, char** argv) {
|
2018-08-03 22:36:18 +02:00
|
|
|
if (REBOOT_BOOTLOADER_ON_PANIC) {
|
|
|
|
InstallRebootSignalHandlers();
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
}
|
2011-12-19 20:21:32 +01:00
|
|
|
|
2022-04-12 23:59:05 +02:00
|
|
|
// No threads should be spin up until signalfd
|
|
|
|
// is registered. If the threads are indeed required,
|
|
|
|
// each of these threads _should_ make sure SIGCHLD signal
|
|
|
|
// is blocked. See b/223076262
|
2019-03-27 16:10:41 +01:00
|
|
|
boot_clock::time_point start_time = boot_clock::now();
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
trigger_shutdown = [](const std::string& command) { shutdown_state.TriggerShutdown(command); };
|
2019-11-13 01:21:20 +01:00
|
|
|
|
2019-05-28 19:19:44 +02:00
|
|
|
SetStdioToDevNull(argv);
|
2020-12-10 16:34:29 +01:00
|
|
|
InitKernelLogging(argv);
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
LOG(INFO) << "init second stage started!";
|
2015-04-26 02:42:52 +02:00
|
|
|
|
2022-12-20 15:08:43 +01:00
|
|
|
SelinuxSetupKernelLogging();
|
|
|
|
|
2020-07-23 18:13:37 +02:00
|
|
|
// Update $PATH in the case the second stage init is newer than first stage init, where it is
|
|
|
|
// first set.
|
|
|
|
if (setenv("PATH", _PATH_DEFPATH, 1) != 0) {
|
|
|
|
PLOG(FATAL) << "Could not set $PATH to '" << _PATH_DEFPATH << "' in second stage";
|
|
|
|
}
|
|
|
|
|
2020-03-16 18:17:05 +01:00
|
|
|
// Init should not crash because of a dependence on any other process, therefore we ignore
|
|
|
|
// SIGPIPE and handle EPIPE at the call site directly. Note that setting a signal to SIG_IGN
|
|
|
|
// is inherited across exec, but custom signal handlers are not. Since we do not want to
|
|
|
|
// ignore SIGPIPE for child processes, we set a no-op function for the signal handler instead.
|
|
|
|
{
|
|
|
|
struct sigaction action = {.sa_flags = SA_RESTART};
|
|
|
|
action.sa_handler = [](int) {};
|
|
|
|
sigaction(SIGPIPE, &action, nullptr);
|
|
|
|
}
|
2019-10-23 02:18:42 +02:00
|
|
|
|
2019-04-18 23:56:24 +02:00
|
|
|
// Set init and its forked children's oom_adj.
|
2019-10-23 02:18:42 +02:00
|
|
|
if (auto result =
|
|
|
|
WriteFile("/proc/1/oom_score_adj", StringPrintf("%d", DEFAULT_OOM_SCORE_ADJUST));
|
2020-02-05 19:49:33 +01:00
|
|
|
!result.ok()) {
|
2019-10-23 02:18:42 +02:00
|
|
|
LOG(ERROR) << "Unable to write " << DEFAULT_OOM_SCORE_ADJUST
|
|
|
|
<< " to /proc/1/oom_score_adj: " << result.error();
|
2019-04-18 23:56:24 +02:00
|
|
|
}
|
|
|
|
|
2017-05-02 23:44:39 +02:00
|
|
|
// Set up a session keyring that all processes will have access to. It
|
|
|
|
// will hold things like FBE encryption keys. No process should override
|
|
|
|
// its session keyring.
|
2017-05-10 02:09:06 +02:00
|
|
|
keyctl_get_keyring_ID(KEY_SPEC_SESSION_KEYRING, 1);
|
2017-05-02 23:44:39 +02:00
|
|
|
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
// Indicate that booting is in progress to background fw loaders, etc.
|
|
|
|
close(open("/dev/.booting", O_WRONLY | O_CREAT | O_CLOEXEC, 0000));
|
2015-02-28 15:39:11 +01:00
|
|
|
|
2019-03-04 10:53:34 +01:00
|
|
|
// See if need to load debug props to allow adb root, when the device is unlocked.
|
|
|
|
const char* force_debuggable_env = getenv("INIT_FORCE_DEBUGGABLE");
|
2019-08-20 00:21:25 +02:00
|
|
|
bool load_debug_prop = false;
|
2019-03-04 10:53:34 +01:00
|
|
|
if (force_debuggable_env && AvbHandle::IsDeviceUnlocked()) {
|
|
|
|
load_debug_prop = "true"s == force_debuggable_env;
|
|
|
|
}
|
|
|
|
unsetenv("INIT_FORCE_DEBUGGABLE");
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
// Umount the debug ramdisk so property service doesn't read .prop files from there, when it
|
|
|
|
// is not meant to.
|
|
|
|
if (!load_debug_prop) {
|
|
|
|
UmountDebugRamdisk();
|
|
|
|
}
|
|
|
|
|
|
|
|
PropertyInit();
|
|
|
|
|
2020-10-07 01:58:19 +02:00
|
|
|
// Umount second stage resources after property service has read the .prop files.
|
|
|
|
UmountSecondStageRes();
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
// Umount the debug ramdisk after property service has read the .prop files when it means to.
|
|
|
|
if (load_debug_prop) {
|
|
|
|
UmountDebugRamdisk();
|
|
|
|
}
|
|
|
|
|
2019-11-22 08:14:10 +01:00
|
|
|
// Mount extra filesystems required during second stage init
|
|
|
|
MountExtraFilesystems();
|
|
|
|
|
init: cleanup is_first_stage conditionals
A recent change to the is_first_stage conditionals created a unneeded
else { } block as both the code in the else { } block and any code
that runs after it are both in the second stage of init. A first step
to clean this up is to remove this else block.
Secondly, given the above confusion, it makes sense to simplify the two
if (is_first_stage) conditions into one, which only now requires
duplicating one line to initialize logging and the actual "init
first/second stage started!" logs.
Lastly, there are a few commands ran at the beginning of both init
stages that do not need to be,
* boot_clock::time_point start_time = boot_clock::now();
This is only used in the first stage so keep it there
* umask(0);
umasks are preserved across execve() so it only needs to be set in the
first stage
* chmod("/proc/cmdline", 0440);
This needs to be moved until after /proc is mounted in the first
stage, but otherwise only needs to be done once
Test: Boot bullhead, check umask, check cmdline permissions, check
boot time property
Change-Id: Idb7df1d4330960ce282d9609f5c62281ee2638b9
2017-03-17 02:08:56 +01:00
|
|
|
// Now set up SELinux for second stage.
|
2017-08-10 21:22:44 +02:00
|
|
|
SelabelInitialize();
|
|
|
|
SelinuxRestoreContext();
|
2012-01-13 14:48:47 +01:00
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
Epoll epoll;
|
2020-02-05 19:49:33 +01:00
|
|
|
if (auto result = epoll.Open(); !result.ok()) {
|
2015-10-25 01:20:18 +02:00
|
|
|
PLOG(FATAL) << result.error();
|
2015-04-25 06:13:44 +02:00
|
|
|
}
|
|
|
|
|
2022-10-14 18:35:35 +02:00
|
|
|
// We always reap children before responding to the other pending functions. This is to
|
|
|
|
// prevent a race where other daemons see that a service has exited and ask init to
|
|
|
|
// start it again via ctl.start before init has reaped it.
|
|
|
|
epoll.SetFirstCallback(ReapAnyOutstandingChildren);
|
|
|
|
|
2015-10-25 01:20:18 +02:00
|
|
|
InstallSignalFdHandler(&epoll);
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
InstallInitNotifier(&epoll);
|
2019-04-23 02:46:37 +02:00
|
|
|
StartPropertyService(&property_fd);
|
|
|
|
|
2019-08-20 00:21:25 +02:00
|
|
|
// Make the time that init stages started available for bootstat to log.
|
|
|
|
RecordStageBoottimes(start_time);
|
|
|
|
|
|
|
|
// Set libavb version for Framework-only OTA match in Treble build.
|
|
|
|
if (const char* avb_version = getenv("INIT_AVB_VERSION"); avb_version != nullptr) {
|
|
|
|
SetProperty("ro.boot.avb_version", avb_version);
|
|
|
|
}
|
|
|
|
unsetenv("INIT_AVB_VERSION");
|
|
|
|
|
|
|
|
fs_mgr_vendor_overlay_mount_all();
|
|
|
|
export_oem_lock_status();
|
2019-03-13 18:18:24 +01:00
|
|
|
MountHandler mount_handler(&epoll);
|
2020-05-13 03:13:14 +02:00
|
|
|
SetUsbController();
|
2021-09-29 14:14:35 +02:00
|
|
|
SetKernelVersion();
|
2011-12-16 23:18:06 +01:00
|
|
|
|
2019-07-23 01:05:36 +02:00
|
|
|
const BuiltinFunctionMap& function_map = GetBuiltinFunctionMap();
|
2015-08-26 20:43:36 +02:00
|
|
|
Action::set_function_map(&function_map);
|
|
|
|
|
Proper mount namespace configuration for bionic
This CL fixes the design problem of the previous mechanism for providing
the bootstrap bionic and the runtime bionic to the same path.
Previously, bootstrap bionic was self-bind-mounted; i.e.
/system/bin/libc.so is bind-mounted to itself. And the runtime bionic
was bind-mounted on top of the bootstrap bionic. This has not only caused
problems like `adb sync` not working(b/122737045), but also is quite
difficult to understand due to the double-and-self mounting.
This is the new design:
Most importantly, these four are all distinct:
1) bootstrap bionic (/system/lib/bootstrap/libc.so)
2) runtime bionic (/apex/com.android.runtime/lib/bionic/libc.so)
3) mount point for 1) and 2) (/bionic/lib/libc.so)
4) symlink for 3) (/system/lib/libc.so -> /bionic/lib/libc.so)
Inside the mount namespace of the pre-apexd processes, 1) is
bind-mounted to 3). Likewise, inside the mount namespace of the
post-apexd processes, 2) is bind-mounted to 3). In other words, there is
no self-mount, and no double-mount.
Another change is that mount points are under /bionic and the legacy
paths become symlinks to the mount points. This is to make sure that
there is no bind mounts under /system, which is breaking some apps.
Finally, code for creating mount namespaces, mounting bionic, etc are
refactored to mount_namespace.cpp
Bug: 120266448
Bug: 123275379
Test: m, device boots, adb sync/push/pull works,
especially with following paths:
/bionic/lib64/libc.so
/bionic/bin/linker64
/system/lib64/bootstrap/libc.so
/system/bin/bootstrap/linker64
Change-Id: Icdfbdcc1efca540ac854d4df79e07ee61fca559f
2019-01-16 15:00:59 +01:00
|
|
|
if (!SetupMountNamespaces()) {
|
|
|
|
PLOG(FATAL) << "SetupMountNamespaces failed";
|
|
|
|
}
|
|
|
|
|
2020-04-28 22:55:19 +02:00
|
|
|
InitializeSubcontext();
|
2017-09-13 00:58:47 +02:00
|
|
|
|
2017-04-20 00:31:58 +02:00
|
|
|
ActionManager& am = ActionManager::GetInstance();
|
2017-07-28 01:20:58 +02:00
|
|
|
ServiceList& sm = ServiceList::GetInstance();
|
2017-04-20 00:31:58 +02:00
|
|
|
|
2017-07-27 21:54:48 +02:00
|
|
|
LoadBootScripts(am, sm);
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2017-03-13 20:24:49 +01:00
|
|
|
// Turning this on and letting the INFO logging be discarded adds 0.2s to
|
|
|
|
// Nexus 9 boot time, so it's disabled by default.
|
2017-04-20 00:31:58 +02:00
|
|
|
if (false) DumpState();
|
2015-07-24 02:53:11 +02:00
|
|
|
|
2019-02-14 21:46:13 +01:00
|
|
|
// Make the GSI status available before scripts start running.
|
2020-02-25 09:31:10 +01:00
|
|
|
auto is_running = android::gsi::IsGsiRunning() ? "1" : "0";
|
|
|
|
SetProperty(gsi::kGsiBootedProp, is_running);
|
|
|
|
auto is_installed = android::gsi::IsGsiInstalled() ? "1" : "0";
|
|
|
|
SetProperty(gsi::kGsiInstalledProp, is_installed);
|
2023-02-18 08:16:17 +01:00
|
|
|
if (android::gsi::IsGsiRunning()) {
|
|
|
|
std::string dsu_slot;
|
|
|
|
if (android::gsi::GetActiveDsu(&dsu_slot)) {
|
|
|
|
SetProperty(gsi::kDsuSlotProp, dsu_slot);
|
|
|
|
}
|
|
|
|
}
|
2019-02-14 21:46:13 +01:00
|
|
|
|
2018-12-21 20:41:50 +01:00
|
|
|
am.QueueBuiltinAction(SetupCgroupsAction, "SetupCgroups");
|
2019-07-30 13:11:20 +02:00
|
|
|
am.QueueBuiltinAction(SetKptrRestrictAction, "SetKptrRestrict");
|
init: add builtin check for perf_event LSM hooks
Historically, the syscall was controlled by a system-wide
perf_event_paranoid sysctl, which is not flexible enough to allow only
specific processes to use the syscall. However, SELinux support for the
syscall has been upstreamed recently[1] (and is being backported to
Android R release common kernels).
[1] https://github.com/torvalds/linux/commit/da97e18458fb42d7c00fac5fd1c56a3896ec666e
As the presence of these hooks is not guaranteed on all Android R
platforms (since we support upgrades while keeping an older kernel), we
need to test for the feature dynamically. The LSM hooks themselves have
no way of being detected directly, so we instead test for their effects,
so we perform several syscalls, and look for a specific success/failure
combination, corresponding to the platform's SELinux policy.
If hooks are detected, perf_event_paranoid is set to -1 (unrestricted),
as the SELinux policy is then sufficient to control access.
This is done within init for several reasons:
* CAP_SYS_ADMIN side-steps perf_event_paranoid, so the tests can be done
if non-root users aren't allowed to use the syscall (the default).
* init is already the setter of the paranoid value (see init.rc), which
is also a privileged operation.
* the test itself is simple (couple of syscalls), so having a dedicated
test binary/domain felt excessive.
I decided to go through a new sysprop (set by a builtin test in
second-stage init), and keeping the actuation in init.rc. We can change
it to an immediate write to the paranoid value if a use-case comes up
that requires the decision to be made earlier in the init sequence.
Bug: 137092007
Change-Id: Ib13a31fee896f17a28910d993df57168a83a4b3d
2020-01-14 23:02:53 +01:00
|
|
|
am.QueueBuiltinAction(TestPerfEventSelinuxAction, "TestPerfEventSelinux");
|
2021-07-22 06:53:28 +02:00
|
|
|
am.QueueBuiltinAction(ConnectEarlyStageSnapuserdAction, "ConnectEarlyStageSnapuserd");
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("early-init");
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-04-25 03:50:30 +02:00
|
|
|
// Queue an action that waits for coldboot done so we know ueventd has set up all of /dev...
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueBuiltinAction(wait_for_coldboot_done_action, "wait_for_coldboot_done");
|
2015-04-25 03:50:30 +02:00
|
|
|
// ... so that we can start queuing up actions that require stuff from /dev.
|
2017-08-10 21:22:44 +02:00
|
|
|
am.QueueBuiltinAction(SetMmapRndBitsAction, "SetMmapRndBits");
|
2018-05-19 00:25:15 +02:00
|
|
|
Keychords keychords;
|
2015-10-25 01:20:18 +02:00
|
|
|
am.QueueBuiltinAction(
|
2019-06-10 20:08:01 +02:00
|
|
|
[&epoll, &keychords](const BuiltinArguments& args) -> Result<void> {
|
|
|
|
for (const auto& svc : ServiceList::GetInstance()) {
|
|
|
|
keychords.Register(svc->keycodes());
|
|
|
|
}
|
|
|
|
keychords.Start(&epoll, HandleKeychord);
|
|
|
|
return {};
|
|
|
|
},
|
|
|
|
"KeychordInit");
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-03-28 07:20:44 +01:00
|
|
|
// Trigger all the boot actions to get us started.
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("init");
|
2011-08-25 00:28:23 +02:00
|
|
|
|
2015-02-07 05:15:18 +01:00
|
|
|
// Don't mount filesystems or start core system services in charger mode.
|
2017-03-29 01:40:41 +02:00
|
|
|
std::string bootmode = GetProperty("ro.bootmode", "");
|
2015-07-24 19:11:05 +02:00
|
|
|
if (bootmode == "charger") {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("charger");
|
2011-08-25 00:28:23 +02:00
|
|
|
} else {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueEventTrigger("late-init");
|
2011-08-25 00:28:23 +02:00
|
|
|
}
|
2008-10-21 16:00:00 +02:00
|
|
|
|
2015-02-07 05:15:18 +01:00
|
|
|
// Run all property triggers based on current state of the properties.
|
2015-07-24 02:53:11 +02:00
|
|
|
am.QueueBuiltinAction(queue_property_triggers_action, "queue_property_triggers");
|
2010-04-14 04:52:01 +02:00
|
|
|
|
2020-07-07 00:26:49 +02:00
|
|
|
// Restore prio before main loop
|
|
|
|
setpriority(PRIO_PROCESS, 0, 0);
|
2015-04-25 03:50:30 +02:00
|
|
|
while (true) {
|
2023-02-18 00:34:49 +01:00
|
|
|
// By default, sleep until something happens. Do not convert far_future into
|
|
|
|
// std::chrono::milliseconds because that would trigger an overflow. The unit of boot_clock
|
|
|
|
// is 1ns.
|
|
|
|
const boot_clock::time_point far_future = boot_clock::time_point::max();
|
|
|
|
boot_clock::time_point next_action_time = far_future;
|
2017-03-24 00:54:38 +01:00
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
auto shutdown_command = shutdown_state.CheckShutdown();
|
|
|
|
if (shutdown_command) {
|
2020-03-25 02:00:23 +01:00
|
|
|
LOG(INFO) << "Got shutdown_command '" << *shutdown_command
|
|
|
|
<< "' Calling HandlePowerctlMessage()";
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
HandlePowerctlMessage(*shutdown_command);
|
init: fix crash when reboot is triggered by a builtin
Builtin commands may set the sys.powerctl property, which causes
reboot to be immediately processed. Unfortunately, part of the reboot
processing involves clearing the action queue, so when this scenario
happens, ActionManager::ExecuteOneCommand() can abort due to its state
being unexpectedly changed.
Longer term, the real fix here is to split init and property service.
In this case, the property sets will be sent to property service and
the reboot will only be processed once property service responds back
to init that the property has been set. Since that will not happen
within the action queue, there will be no risk of failure.
Short term, this change sets a flag in init to shutdown the device
before the next action is run, which defers the shutdown enough to fix
the crash, but continues to prevent any further commands from running.
Bug: 65374456
Test: force bullhead into the repro case and observe that it no longer
repros
Change-Id: I89c73dad8d7912a845d694b095cab061b8dcc05e
2017-09-13 23:39:45 +02:00
|
|
|
}
|
|
|
|
|
init: handle property messages asynchronously #2
A previous change moved property_service into its own thread, since
there was otherwise a deadlock whenever a process called by init would
try to set a property. This new thread, however, would send a message
via a blocking socket to init for each property that it received,
since init may need to take action depending on which property it is.
Unfortunately, this means that the deadlock is still possible, the
only difference is the socket's buffer must be filled before init deadlocks.
This change, therefore, adds the following:
1) A lock for instructing init to reboot
2) A lock for waiting on properties
3) A lock for queueing new properties
A previous version of this change was reverted and added locks around
all service operations and allowed the property thread to spawn
services directly. This was complex due to the fact that this code
was not designed to be multi-threaded. It was reverted due to
apparent issues during reboot. This change keeps a queue of processes
pending control messages, which it will then handle in the future. It
is less flexible but safer.
Bug: 146877356
Bug: 148236233
Bug: 150863651
Bug: 151251827
Test: multiple reboot tests, safely restarting hwservicemanager
Change-Id: Ice773436e85d3bf636bb0a892f3f6002bdf996b6
2020-03-12 22:29:25 +01:00
|
|
|
if (!(prop_waiter_state.MightBeWaiting() || Service::is_exec_service_running())) {
|
2015-07-24 02:53:11 +02:00
|
|
|
am.ExecuteOneCommand();
|
2023-02-18 00:34:49 +01:00
|
|
|
// If there's more work to do, wake up again immediately.
|
|
|
|
if (am.HasMoreCommands()) {
|
|
|
|
next_action_time = boot_clock::now();
|
|
|
|
}
|
2015-02-07 05:15:18 +01:00
|
|
|
}
|
2023-02-18 00:34:49 +01:00
|
|
|
// Since the above code examined pending actions, no new actions must be
|
|
|
|
// queued by the code between this line and the Epoll::Wait() call below
|
|
|
|
// without calling WakeMainInitThread().
|
2020-01-31 17:33:36 +01:00
|
|
|
if (!IsShuttingDown()) {
|
|
|
|
auto next_process_action_time = HandleProcessActions();
|
|
|
|
|
|
|
|
// If there's a process that needs restarting, wake up in time for that.
|
|
|
|
if (next_process_action_time) {
|
2023-02-18 00:34:49 +01:00
|
|
|
next_action_time = std::min(next_action_time, *next_process_action_time);
|
2017-03-24 00:54:38 +01:00
|
|
|
}
|
2020-01-31 17:33:36 +01:00
|
|
|
}
|
2016-11-11 02:43:47 +01:00
|
|
|
|
2023-02-18 00:34:49 +01:00
|
|
|
std::optional<std::chrono::milliseconds> epoll_timeout;
|
|
|
|
if (next_action_time != far_future) {
|
|
|
|
epoll_timeout = std::chrono::ceil<std::chrono::milliseconds>(
|
|
|
|
std::max(next_action_time - boot_clock::now(), 0ns));
|
2015-02-04 23:46:36 +01:00
|
|
|
}
|
2022-10-14 18:13:19 +02:00
|
|
|
auto epoll_result = epoll.Wait(epoll_timeout);
|
|
|
|
if (!epoll_result.ok()) {
|
|
|
|
LOG(ERROR) << epoll_result.error();
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
2020-03-27 22:08:20 +01:00
|
|
|
if (!IsShuttingDown()) {
|
|
|
|
HandleControlMessages();
|
2020-05-13 03:13:14 +02:00
|
|
|
SetUsbController();
|
2020-03-27 22:08:20 +01:00
|
|
|
}
|
2008-10-21 16:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-22 21:53:17 +02:00
|
|
|
|
|
|
|
} // namespace init
|
|
|
|
} // namespace android
|