bpfLoader: automatically bump ringbuffer size up to page size

A 4kB BPF_MAP_TYPE_RINGBUF does not work on 16kB page size kernel,
as an eBPF ring buffer size *must* be a multiple of the page size.

However, it is inefficient to force a 16kB RINGBUF on kernels
with only a 4kB page size.

It wastes 12kB of mlock'ed memory per ring buffer,
which isn't much, but - as we add more ringbuffers - it'll add up...

The userspace ring buffer code doesn't actually mind the
automatic increase in size, as it retrieves the actual
size from the map fd (ie. from the kernel) during init
and thus adjusts automatically.

Test: TreeHugger
Bug: 279819914
Signed-off-by: Maciej Żenczykowski <maze@google.com>
Change-Id: I9fcb1f9dc2e86038a7f1a486f4efbc28dba38ca0
This commit is contained in:
Maciej Żenczykowski 2023-06-16 08:18:49 +00:00
parent c93af9eec8
commit 8a117a374b

View file

@ -31,13 +31,13 @@
#include <sys/wait.h>
#include <unistd.h>
// This is BpfLoader v0.39
// This is BpfLoader v0.40
// WARNING: If you ever hit cherrypick conflicts here you're doing it wrong:
// You are NOT allowed to cherrypick bpfloader related patches out of order.
// (indeed: cherrypicking is probably a bad idea and you should merge instead)
// Mainline supports ONLY the published versions of the bpfloader for each Android release.
#define BPFLOADER_VERSION_MAJOR 0u
#define BPFLOADER_VERSION_MINOR 39u
#define BPFLOADER_VERSION_MINOR 40u
#define BPFLOADER_VERSION ((BPFLOADER_VERSION_MAJOR << 16) | BPFLOADER_VERSION_MINOR)
#include "BpfSyscallWrappers.h"
@ -95,6 +95,8 @@ const std::string& getBuildType() {
return t;
}
static unsigned int page_size = static_cast<unsigned int>(getpagesize());
constexpr const char* lookupSelinuxContext(const domain d, const char* const unspecified = "") {
switch (d) {
case domain::unspecified: return unspecified;
@ -705,6 +707,11 @@ static bool mapMatchesExpectations(const unique_fd& fd, const string& mapName,
if (type == BPF_MAP_TYPE_DEVMAP || type == BPF_MAP_TYPE_DEVMAP_HASH)
desired_map_flags |= BPF_F_RDONLY_PROG;
unsigned int desired_max_entries = mapDef.max_entries;
if (type == BPF_MAP_TYPE_RINGBUF) {
if (desired_max_entries < page_size) desired_max_entries = page_size;
}
// The following checks should *never* trigger, if one of them somehow does,
// it probably means a bpf .o file has been changed/replaced at runtime
// and bpfloader was manually rerun (normally it should only run *once*
@ -715,7 +722,7 @@ static bool mapMatchesExpectations(const unique_fd& fd, const string& mapName,
if ((fd_type == type) &&
(fd_key_size == (int)mapDef.key_size) &&
(fd_value_size == (int)mapDef.value_size) &&
(fd_max_entries == (int)mapDef.max_entries) &&
(fd_max_entries == (int)desired_max_entries) &&
(fd_map_flags == desired_map_flags)) {
return true;
}
@ -842,6 +849,11 @@ static int createMaps(const char* elfPath, ifstream& elfFile, vector<unique_fd>&
type = BPF_MAP_TYPE_HASH;
}
unsigned int max_entries = md[i].max_entries;
if (type == BPF_MAP_TYPE_RINGBUF) {
if (max_entries < page_size) max_entries = page_size;
}
domain selinux_context = getDomainFromSelinuxContext(md[i].selinux_context);
if (specified(selinux_context)) {
if (!inDomainBitmask(selinux_context, allowedDomainBitmask)) {
@ -887,7 +899,7 @@ static int createMaps(const char* elfPath, ifstream& elfFile, vector<unique_fd>&
.map_flags = md[i].map_flags,
.key_size = md[i].key_size,
.value_size = md[i].value_size,
.max_entries = md[i].max_entries,
.max_entries = max_entries,
};
if (btfFd.has_value() && btfTypeIdMap.find(mapNames[i]) != btfTypeIdMap.end()) {
attr.btf_fd = btfFd->get();