diff --git a/bpfloader/Android.bp b/bpfloader/Android.bp new file mode 100644 index 0000000..fe52bcf --- /dev/null +++ b/bpfloader/Android.bp @@ -0,0 +1,58 @@ +// +// Copyright (C) 2018 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// bpfLoad binary +// +cc_binary { + name: "bpfloader", + + defaults: ["netd_defaults"], + cflags: [ + "-Wall", + "-Werror", + "-Wthread-safety", + ], + sanitize: { + integer_overflow: true, + }, + clang: true, + shared_libs: [ + "libcutils", + "libbpf", + "libbase", + "liblog", + "libnetdutils", + ], + srcs: [ + "BpfLoader.cpp", + ], + + required: [ + "bpf_kern.o", + ], + +} + +bpf { + name: "bpf_kern.o", + srcs: ["bpf_kern.c"], + cflags: [ + "-Wall", + "-Werror", + ], + include_dirs: ["system/netd/libbpf/include"], +} diff --git a/bpfloader/BpfLoader.cpp b/bpfloader/BpfLoader.cpp new file mode 100644 index 0000000..c7b1332 --- /dev/null +++ b/bpfloader/BpfLoader.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LOG_TAG +#define LOG_TAG "bpfloader" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include "bpf/BpfUtils.h" +#include "bpf/bpf_shared.h" + +using android::base::unique_fd; +using android::netdutils::Slice; + +#define BPF_PROG_PATH "/system/etc/bpf" +#define BPF_PROG_SRC BPF_PROG_PATH "/bpf_kern.o" + +#define CLEANANDEXIT(ret, mapPatterns) \ + do { \ + for (int i = 0; i < mapPatterns.size(); i++) { \ + if (mapPatterns[i].fd > -1) { \ + close(mapPatterns[i].fd); \ + } \ + } \ + return ret; \ + } while (0) + +using android::bpf::BpfMapInfo; +using android::bpf::BpfProgInfo; + +int main() { + const std::vector mapPatterns = { + BpfMapInfo(COOKIE_TAG_MAP, COOKIE_TAG_MAP_PATH), + BpfMapInfo(UID_COUNTERSET_MAP, UID_COUNTERSET_MAP_PATH), + BpfMapInfo(APP_UID_STATS_MAP, APP_UID_STATS_MAP_PATH), + BpfMapInfo(UID_STATS_MAP, UID_STATS_MAP_PATH), + BpfMapInfo(TAG_STATS_MAP, TAG_STATS_MAP_PATH), + BpfMapInfo(IFACE_STATS_MAP, IFACE_STATS_MAP_PATH), + BpfMapInfo(CONFIGURATION_MAP, CONFIGURATION_MAP_PATH), + BpfMapInfo(UID_OWNER_MAP, UID_OWNER_MAP_PATH), + }; + for (int i = 0; i < mapPatterns.size(); i++) { + if (mapPatterns[i].fd < 0) { + ALOGE("Rerieve Map from %s failed: %d", mapPatterns[i].path.c_str(), mapPatterns[i].fd); + CLEANANDEXIT(-1, mapPatterns); + } + } + BpfProgInfo programs[] = { + {BPF_CGROUP_INET_EGRESS, BPF_EGRESS_PROG_PATH, BPF_CGROUP_EGRESS_PROG_NAME, + BPF_PROG_TYPE_CGROUP_SKB, unique_fd(-1)}, + {BPF_CGROUP_INET_INGRESS, BPF_INGRESS_PROG_PATH, BPF_CGROUP_INGRESS_PROG_NAME, + BPF_PROG_TYPE_CGROUP_SKB, unique_fd(-1)}, + {MAX_BPF_ATTACH_TYPE, XT_BPF_INGRESS_PROG_PATH, XT_BPF_INGRESS_PROG_NAME, + BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)}, + {MAX_BPF_ATTACH_TYPE, XT_BPF_EGRESS_PROG_PATH, XT_BPF_EGRESS_PROG_NAME, + BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)}, + {MAX_BPF_ATTACH_TYPE, XT_BPF_WHITELIST_PROG_PATH, XT_BPF_WHITELIST_PROG_NAME, + BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)}, + {MAX_BPF_ATTACH_TYPE, XT_BPF_BLACKLIST_PROG_PATH, XT_BPF_BLACKLIST_PROG_NAME, + BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)}}; + int ret = android::bpf::parseProgramsFromFile(BPF_PROG_SRC, programs, ARRAY_SIZE(programs), + mapPatterns); + CLEANANDEXIT(ret, mapPatterns); +} diff --git a/bpfloader/bpf_kern.c b/bpfloader/bpf_kern.c new file mode 100644 index 0000000..4fe8140 --- /dev/null +++ b/bpfloader/bpf_kern.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "bpf_kern.h" + + +ELF_SEC(BPF_CGROUP_INGRESS_PROG_NAME) +int bpf_cgroup_ingress(struct __sk_buff* skb) { + return bpf_traffic_account(skb, BPF_INGRESS); +} + +ELF_SEC(BPF_CGROUP_EGRESS_PROG_NAME) +int bpf_cgroup_egress(struct __sk_buff* skb) { + return bpf_traffic_account(skb, BPF_EGRESS); +} + +ELF_SEC(XT_BPF_EGRESS_PROG_NAME) +int xt_bpf_egress_prog(struct __sk_buff* skb) { + uint32_t key = skb->ifindex; + bpf_update_stats(skb, IFACE_STATS_MAP, BPF_EGRESS, &key); + return BPF_MATCH; +} + +ELF_SEC(XT_BPF_INGRESS_PROG_NAME) +int xt_bpf_ingress_prog(struct __sk_buff* skb) { + uint32_t key = skb->ifindex; + bpf_update_stats(skb, IFACE_STATS_MAP, BPF_INGRESS, &key); + return BPF_MATCH; +} + +ELF_SEC(XT_BPF_WHITELIST_PROG_NAME) +int xt_bpf_whitelist_prog(struct __sk_buff* skb) { + uint32_t sock_uid = get_socket_uid(skb); + if (is_system_uid(sock_uid)) return BPF_MATCH; + uint8_t* whitelistMatch = find_map_entry(UID_OWNER_MAP, &sock_uid); + if (whitelistMatch) return *whitelistMatch & HAPPY_BOX_MATCH; + return BPF_NOMATCH; +} + +ELF_SEC(XT_BPF_BLACKLIST_PROG_NAME) +int xt_bpf_blacklist_prog(struct __sk_buff* skb) { + uint32_t sock_uid = get_socket_uid(skb); + uint8_t* blacklistMatch = find_map_entry(UID_OWNER_MAP, &sock_uid); + if (blacklistMatch) return *blacklistMatch & PENALTY_BOX_MATCH; + return BPF_NOMATCH; +} diff --git a/bpfloader/bpf_kern.h b/bpfloader/bpf_kern.h new file mode 100644 index 0000000..e56033d --- /dev/null +++ b/bpfloader/bpf_kern.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This h file together with bpf_kern.c is used for compiling the eBPF kernel + * program. To generate the bpf_kern.o file manually, use the clang prebuilt in + * this android tree to compile the files with --target=bpf options. For + * example, in system/netd/ directory, execute the following command: + * $: ANDROID_BASE_DIRECTORY/prebuilts/clang/host/linux-x86/clang-4691093/bin/clang \ + * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/uapi/ \ + * -I ANDROID_BASE_DIRECTORY/system/netd/bpfloader/ \ + * -I ANDROID_BASE_DIRECTORY/bionic/libc/kernel/android/uapi/ \ + * -I ANDROID_BASE_DIRECTORY/bionic/libc/include \ + * -I ANDROID_BASE_DIRECTORY/system/netd/libbpf/include \ + * --target=bpf -O2 -c bpfloader/bpf_kern.c -o bpfloader/bpf_kern.o + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf/bpf_shared.h" + +#define ELF_SEC(NAME) __attribute__((section(NAME), used)) + +struct uid_tag { + uint32_t uid; + uint32_t tag; +}; + +struct stats_key { + uint32_t uid; + uint32_t tag; + uint32_t counterSet; + uint32_t ifaceIndex; +}; + +struct stats_value { + uint64_t rxPackets; + uint64_t rxBytes; + uint64_t txPackets; + uint64_t txBytes; +}; + +/* helper functions called from eBPF programs written in C */ +static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem; +static int (*write_to_map_entry)(uint64_t map, void* key, void* value, + uint64_t flags) = (void*)BPF_FUNC_map_update_elem; +static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem; +static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie; +static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid; +static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to, + int len) = (void*)BPF_FUNC_skb_load_bytes; + +// This is defined for cgroup bpf filter only. +#define BPF_PASS 1 +#define BPF_DROP 0 + +// This is used for xt_bpf program only. +#define BPF_NOMATCH 0 +#define BPF_MATCH 1 + +#define BPF_EGRESS 0 +#define BPF_INGRESS 1 + +#define IP_PROTO_OFF offsetof(struct iphdr, protocol) +#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr) +#define IPPROTO_IHL_OFF 0 +#define TCP_FLAG_OFF 13 +#define RST_OFFSET 2 + +static __always_inline int is_system_uid(uint32_t uid) { + return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID); +} + +static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map, + int direction, void *key) { + struct stats_value* value; + value = find_map_entry(map, key); + if (!value) { + struct stats_value newValue = {}; + write_to_map_entry(map, key, &newValue, BPF_NOEXIST); + value = find_map_entry(map, key); + } + if (value) { + if (direction == BPF_EGRESS) { + __sync_fetch_and_add(&value->txPackets, 1); + __sync_fetch_and_add(&value->txBytes, skb->len); + } else if (direction == BPF_INGRESS) { + __sync_fetch_and_add(&value->rxPackets, 1); + __sync_fetch_and_add(&value->rxBytes, skb->len); + } + } +} + +static inline bool skip_owner_match(struct __sk_buff* skb) { + int offset = -1; + int ret = 0; + if (skb->protocol == ETH_P_IP) { + offset = IP_PROTO_OFF; + uint8_t proto, ihl; + uint16_t flag; + ret = bpf_skb_load_bytes(skb, offset, &proto, 1); + if (!ret) { + if (proto == IPPROTO_ESP) { + return true; + } else if (proto == IPPROTO_TCP) { + ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1); + ihl = ihl & 0x0F; + ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1); + if (ret == 0 && (flag >> RST_OFFSET & 1)) { + return true; + } + } + } + } else if (skb->protocol == ETH_P_IPV6) { + offset = IPV6_PROTO_OFF; + uint8_t proto; + ret = bpf_skb_load_bytes(skb, offset, &proto, 1); + if (!ret) { + if (proto == IPPROTO_ESP) { + return true; + } else if (proto == IPPROTO_TCP) { + uint16_t flag; + ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1); + if (ret == 0 && (flag >> RST_OFFSET & 1)) { + return true; + } + } + } + } + return false; +} + +static __always_inline BpfConfig getConfig() { + uint32_t mapSettingKey = CONFIGURATION_KEY; + BpfConfig* config = find_map_entry(CONFIGURATION_MAP, &mapSettingKey); + if (!config) { + // Couldn't read configuration entry. Assume everything is disabled. + return DEFAULT_CONFIG; + } + return *config; +} + +static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) { + if (skip_owner_match(skb)) return BPF_PASS; + + if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS; + + BpfConfig enabledRules = getConfig(); + if (!enabledRules) { + return BPF_PASS; + } + + uint8_t* uidEntry = find_map_entry(UID_OWNER_MAP, &uid); + uint8_t uidRules = uidEntry ? *uidEntry : 0; + if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) { + return BPF_DROP; + } + if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) { + return BPF_DROP; + } + if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) { + return BPF_DROP; + } + return BPF_PASS; +} + +static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) { + uint32_t sock_uid = get_socket_uid(skb); + int match = bpf_owner_match(skb, sock_uid); + if ((direction == BPF_EGRESS) && (match == BPF_DROP)) { + // If an outbound packet is going to be dropped, we do not count that + // traffic. + return match; + } + + uint64_t cookie = get_socket_cookie(skb); + struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie); + uint32_t uid, tag; + if (utag) { + uid = utag->uid; + tag = utag->tag; + } else { + uid = sock_uid; + tag = 0; + } + + struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex}; + + uint8_t* counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid); + if (counterSet) key.counterSet = (uint32_t)*counterSet; + + if (tag) { + bpf_update_stats(skb, TAG_STATS_MAP, direction, &key); + } + + key.tag = 0; + bpf_update_stats(skb, UID_STATS_MAP, direction, &key); + bpf_update_stats(skb, APP_UID_STATS_MAP, direction, &uid); + return match; +} diff --git a/libbpf/BpfMapTest.cpp b/libbpf/BpfMapTest.cpp new file mode 100644 index 0000000..e367bdb --- /dev/null +++ b/libbpf/BpfMapTest.cpp @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include "bpf/BpfMap.h" +#include "bpf/BpfNetworkStats.h" +#include "bpf/BpfUtils.h" + +using ::testing::_; +using ::testing::ByMove; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::StrictMock; +using ::testing::Test; + +namespace android { +namespace bpf { + +using base::unique_fd; +using netdutils::StatusOr; + +constexpr uint32_t TEST_MAP_SIZE = 10; +constexpr uint32_t TEST_KEY1 = 1; +constexpr uint32_t TEST_VALUE1 = 10; +constexpr const char PINNED_MAP_PATH[] = "/sys/fs/bpf/testMap"; + +class BpfMapTest : public testing::Test { + protected: + BpfMapTest() {} + int mMapFd; + + void SetUp() { + if (!access(PINNED_MAP_PATH, R_OK)) { + EXPECT_EQ(0, remove(PINNED_MAP_PATH)); + } + mMapFd = createMap(BPF_MAP_TYPE_HASH, sizeof(uint32_t), sizeof(uint32_t), TEST_MAP_SIZE, + BPF_F_NO_PREALLOC); + } + + void TearDown() { + if (!access(PINNED_MAP_PATH, R_OK)) { + EXPECT_EQ(0, remove(PINNED_MAP_PATH)); + } + close(mMapFd); + } + + void checkMapInvalid(BpfMap& map) { + EXPECT_FALSE(map.isValid()); + EXPECT_EQ(-1, map.getMap().get()); + EXPECT_TRUE(map.getPinnedPath().empty()); + } + + void checkMapValid(BpfMap& map) { + EXPECT_LE(0, map.getMap().get()); + EXPECT_TRUE(map.isValid()); + } + + void writeToMapAndCheck(BpfMap& map, uint32_t key, uint32_t value) { + ASSERT_TRUE(isOk(map.writeValue(key, value, BPF_ANY))); + uint32_t value_read; + ASSERT_EQ(0, findMapEntry(map.getMap(), &key, &value_read)); + checkValueAndStatus(value, value_read); + } + + void checkValueAndStatus(uint32_t refValue, StatusOr value) { + ASSERT_TRUE(isOk(value.status())); + ASSERT_EQ(refValue, value.value()); + } + + void populateMap(uint32_t total, BpfMap& map) { + for (uint32_t key = 0; key < total; key++) { + uint32_t value = key * 10; + EXPECT_TRUE(isOk(map.writeValue(key, value, BPF_ANY))); + } + } + + void expectMapEmpty(BpfMap& map) { + auto isEmpty = map.isEmpty(); + ASSERT_TRUE(isOk(isEmpty)); + ASSERT_TRUE(isEmpty.value()); + } +}; + +TEST_F(BpfMapTest, constructor) { + BpfMap testMap1; + checkMapInvalid(testMap1); + + BpfMap testMap2(mMapFd); + checkMapValid(testMap2); + EXPECT_TRUE(testMap2.getPinnedPath().empty()); + + BpfMap testMap3(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE, BPF_F_NO_PREALLOC); + checkMapValid(testMap3); + EXPECT_TRUE(testMap3.getPinnedPath().empty()); +} + +TEST_F(BpfMapTest, basicHelpers) { + BpfMap testMap(mMapFd); + uint32_t key = TEST_KEY1; + uint32_t value_write = TEST_VALUE1; + writeToMapAndCheck(testMap, key, value_write); + StatusOr value_read = testMap.readValue(key); + checkValueAndStatus(value_write, value_read); + StatusOr key_read = testMap.getFirstKey(); + checkValueAndStatus(key, key_read); + ASSERT_TRUE(isOk(testMap.deleteValue(key))); + ASSERT_GT(0, findMapEntry(testMap.getMap(), &key, &value_read)); + ASSERT_EQ(ENOENT, errno); +} + +TEST_F(BpfMapTest, reset) { + BpfMap testMap; + testMap.reset(mMapFd); + uint32_t key = TEST_KEY1; + uint32_t value_write = TEST_VALUE1; + writeToMapAndCheck(testMap, key, value_write); + testMap.reset(); + checkMapInvalid(testMap); + unique_fd invalidFd(mMapFd); + ASSERT_GT(0, findMapEntry(invalidFd, &key, &value_write)); + ASSERT_EQ(EBADF, errno); +} + +TEST_F(BpfMapTest, moveConstructor) { + BpfMap testMap1(mMapFd); + BpfMap testMap2; + testMap2 = std::move(testMap1); + uint32_t key = TEST_KEY1; + checkMapInvalid(testMap1); + uint32_t value = TEST_VALUE1; + writeToMapAndCheck(testMap2, key, value); +} + +TEST_F(BpfMapTest, pinnedToPath) { + BpfMap testMap1(mMapFd); + EXPECT_OK(testMap1.pinToPath(PINNED_MAP_PATH)); + EXPECT_EQ(0, access(PINNED_MAP_PATH, R_OK)); + EXPECT_EQ(0, testMap1.getPinnedPath().compare(PINNED_MAP_PATH)); + BpfMap testMap2(mapRetrieve(PINNED_MAP_PATH, 0)); + checkMapValid(testMap2); + uint32_t key = TEST_KEY1; + uint32_t value = TEST_VALUE1; + writeToMapAndCheck(testMap1, key, value); + StatusOr value_read = testMap2.readValue(key); + checkValueAndStatus(value, value_read); +} + +TEST_F(BpfMapTest, SetUpMap) { + BpfMap testMap1; + EXPECT_OK(testMap1.getOrCreate(TEST_MAP_SIZE, PINNED_MAP_PATH, BPF_MAP_TYPE_HASH)); + EXPECT_EQ(0, access(PINNED_MAP_PATH, R_OK)); + checkMapValid(testMap1); + EXPECT_EQ(0, testMap1.getPinnedPath().compare(PINNED_MAP_PATH)); + BpfMap testMap2; + EXPECT_OK(testMap2.getOrCreate(TEST_MAP_SIZE, PINNED_MAP_PATH, BPF_MAP_TYPE_HASH)); + checkMapValid(testMap2); + EXPECT_EQ(0, testMap2.getPinnedPath().compare(PINNED_MAP_PATH)); + uint32_t key = TEST_KEY1; + uint32_t value = TEST_VALUE1; + writeToMapAndCheck(testMap1, key, value); + StatusOr value_read = testMap2.readValue(key); + checkValueAndStatus(value, value_read); +} + +TEST_F(BpfMapTest, iterate) { + BpfMap testMap(mMapFd); + populateMap(TEST_MAP_SIZE, testMap); + int totalCount = 0; + int totalSum = 0; + const auto iterateWithDeletion = [&totalCount, &totalSum](const uint32_t& key, + BpfMap& map) { + EXPECT_GE((uint32_t)TEST_MAP_SIZE, key); + totalCount++; + totalSum += key; + return map.deleteValue(key); + }; + EXPECT_OK(testMap.iterate(iterateWithDeletion)); + EXPECT_EQ((int)TEST_MAP_SIZE, totalCount); + EXPECT_EQ(((1 + TEST_MAP_SIZE - 1) * (TEST_MAP_SIZE - 1)) / 2, (uint32_t)totalSum); + expectMapEmpty(testMap); +} + +TEST_F(BpfMapTest, iterateWithValue) { + BpfMap testMap(mMapFd); + populateMap(TEST_MAP_SIZE, testMap); + int totalCount = 0; + int totalSum = 0; + const auto iterateWithDeletion = [&totalCount, &totalSum](const uint32_t& key, + const uint32_t& value, + BpfMap& map) { + EXPECT_GE((uint32_t)TEST_MAP_SIZE, key); + EXPECT_EQ(value, key * 10); + totalCount++; + totalSum += value; + return map.deleteValue(key); + }; + EXPECT_OK(testMap.iterateWithValue(iterateWithDeletion)); + EXPECT_EQ((int)TEST_MAP_SIZE, totalCount); + EXPECT_EQ(((1 + TEST_MAP_SIZE - 1) * (TEST_MAP_SIZE - 1)) * 5, (uint32_t)totalSum); + expectMapEmpty(testMap); +} + +TEST_F(BpfMapTest, mapIsEmpty) { + BpfMap testMap(mMapFd); + expectMapEmpty(testMap); + uint32_t key = TEST_KEY1; + uint32_t value_write = TEST_VALUE1; + writeToMapAndCheck(testMap, key, value_write); + auto isEmpty = testMap.isEmpty(); + ASSERT_TRUE(isOk(isEmpty)); + ASSERT_FALSE(isEmpty.value()); + ASSERT_TRUE(isOk(testMap.deleteValue(key))); + ASSERT_GT(0, findMapEntry(testMap.getMap(), &key, &value_write)); + ASSERT_EQ(ENOENT, errno); + expectMapEmpty(testMap); + int entriesSeen = 0; + EXPECT_OK(testMap.iterate( + [&entriesSeen](const unsigned int&, + const BpfMap&) -> netdutils::Status { + entriesSeen++; + return netdutils::status::ok; + })); + EXPECT_EQ(0, entriesSeen); + EXPECT_OK(testMap.iterateWithValue( + [&entriesSeen](const unsigned int&, const unsigned int&, + const BpfMap&) -> netdutils::Status { + entriesSeen++; + return netdutils::status::ok; + })); + EXPECT_EQ(0, entriesSeen); +} + +TEST_F(BpfMapTest, mapClear) { + BpfMap testMap(mMapFd); + populateMap(TEST_MAP_SIZE, testMap); + auto isEmpty = testMap.isEmpty(); + ASSERT_TRUE(isOk(isEmpty)); + ASSERT_FALSE(isEmpty.value()); + ASSERT_TRUE(isOk(testMap.clear())); + expectMapEmpty(testMap); +} + +} // namespace bpf +} // namespace android diff --git a/libbpf/BpfUtils.cpp b/libbpf/BpfUtils.cpp new file mode 100644 index 0000000..955f2ec --- /dev/null +++ b/libbpf/BpfUtils.cpp @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "BpfUtils" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "bpf/BpfUtils.h" + +using android::base::GetUintProperty; +using android::base::StringPrintf; +using android::base::unique_fd; +using android::netdutils::MemBlock; +using android::netdutils::Slice; +using android::netdutils::statusFromErrno; +using android::netdutils::StatusOr; + +constexpr size_t LOG_BUF_SIZE = 65536; + +namespace android { +namespace bpf { + +/* The bpf_attr is a union which might have a much larger size then the struct we are using, while + * The inline initializer only reset the field we are using and leave the reset of the memory as + * is. The bpf kernel code will performs a much stricter check to ensure all unused field is 0. So + * this syscall will normally fail with E2BIG if we don't do a memset to bpf_attr. + */ +bool operator==(const StatsKey& lhs, const StatsKey& rhs) { + return ((lhs.uid == rhs.uid) && (lhs.tag == rhs.tag) && (lhs.counterSet == rhs.counterSet) && + (lhs.ifaceIndex == rhs.ifaceIndex)); +} + +bool operator==(const UidTag& lhs, const UidTag& rhs) { + return ((lhs.uid == rhs.uid) && (lhs.tag == rhs.tag)); +} + +bool operator==(const StatsValue& lhs, const StatsValue& rhs) { + return ((lhs.rxBytes == rhs.rxBytes) && (lhs.txBytes == rhs.txBytes) && + (lhs.rxPackets == rhs.rxPackets) && (lhs.txPackets == rhs.txPackets)); +} + +int bpf(int cmd, Slice bpfAttr) { + return syscall(__NR_bpf, cmd, bpfAttr.base(), bpfAttr.size()); +} + +int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size, uint32_t max_entries, + uint32_t map_flags) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + attr.map_flags = map_flags; + + return bpf(BPF_MAP_CREATE, Slice(&attr, sizeof(attr))); +} + +int writeToMapEntry(const base::unique_fd& map_fd, void* key, void* value, uint64_t flags) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_fd = map_fd.get(); + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + attr.flags = flags; + + return bpf(BPF_MAP_UPDATE_ELEM, Slice(&attr, sizeof(attr))); +} + +int findMapEntry(const base::unique_fd& map_fd, void* key, void* value) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_fd = map_fd.get(); + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + + return bpf(BPF_MAP_LOOKUP_ELEM, Slice(&attr, sizeof(attr))); +} + +int deleteMapEntry(const base::unique_fd& map_fd, void* key) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_fd = map_fd.get(); + attr.key = ptr_to_u64(key); + + return bpf(BPF_MAP_DELETE_ELEM, Slice(&attr, sizeof(attr))); +} + +int getNextMapKey(const base::unique_fd& map_fd, void* key, void* next_key) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_fd = map_fd.get(); + attr.key = ptr_to_u64(key); + attr.next_key = ptr_to_u64(next_key); + + return bpf(BPF_MAP_GET_NEXT_KEY, Slice(&attr, sizeof(attr))); +} + +int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.map_fd = map_fd.get(); + attr.key = 0; + attr.next_key = ptr_to_u64(firstKey); + + return bpf(BPF_MAP_GET_NEXT_KEY, Slice(&attr, sizeof(attr))); +} + +int bpfProgLoad(bpf_prog_type prog_type, Slice bpf_insns, const char* license, + uint32_t kern_version, Slice bpf_log) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.prog_type = prog_type; + attr.insns = ptr_to_u64(bpf_insns.base()); + attr.insn_cnt = bpf_insns.size() / sizeof(struct bpf_insn); + attr.license = ptr_to_u64((void*)license); + attr.log_buf = ptr_to_u64(bpf_log.base()); + attr.log_size = bpf_log.size(); + attr.log_level = DEFAULT_LOG_LEVEL; + attr.kern_version = kern_version; + int ret = bpf(BPF_PROG_LOAD, Slice(&attr, sizeof(attr))); + + if (ret < 0) { + std::string prog_log = netdutils::toString(bpf_log); + std::istringstream iss(prog_log); + for (std::string line; std::getline(iss, line);) { + ALOGE("%s", line.c_str()); + } + } + return ret; +} + +int bpfFdPin(const base::unique_fd& map_fd, const char* pathname) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.pathname = ptr_to_u64((void*)pathname); + attr.bpf_fd = map_fd.get(); + + return bpf(BPF_OBJ_PIN, Slice(&attr, sizeof(attr))); +} + +int mapRetrieve(const char* pathname, uint32_t flag) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.pathname = ptr_to_u64((void*)pathname); + attr.file_flags = flag; + return bpf(BPF_OBJ_GET, Slice(&attr, sizeof(attr))); +} + +int attachProgram(bpf_attach_type type, uint32_t prog_fd, uint32_t cg_fd) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.target_fd = cg_fd; + attr.attach_bpf_fd = prog_fd; + attr.attach_type = type; + + return bpf(BPF_PROG_ATTACH, Slice(&attr, sizeof(attr))); +} + +int detachProgram(bpf_attach_type type, uint32_t cg_fd) { + bpf_attr attr; + memset(&attr, 0, sizeof(attr)); + attr.target_fd = cg_fd; + attr.attach_type = type; + + return bpf(BPF_PROG_DETACH, Slice(&attr, sizeof(attr))); +} + +uint64_t getSocketCookie(int sockFd) { + uint64_t sock_cookie; + socklen_t cookie_len = sizeof(sock_cookie); + int res = getsockopt(sockFd, SOL_SOCKET, SO_COOKIE, &sock_cookie, &cookie_len); + if (res < 0) { + res = -errno; + ALOGE("Failed to get socket cookie: %s\n", strerror(errno)); + errno = -res; + // 0 is an invalid cookie. See sock_gen_cookie. + return NONEXISTENT_COOKIE; + } + return sock_cookie; +} + +bool hasBpfSupport() { + struct utsname buf; + int kernel_version_major; + int kernel_version_minor; + + uint64_t api_level = GetUintProperty("ro.product.first_api_level", 0); + if (api_level == 0) { + ALOGE("Cannot determine initial API level of the device"); + api_level = GetUintProperty("ro.build.version.sdk", 0); + } + + int ret = uname(&buf); + if (ret) { + return false; + } + char dummy; + ret = sscanf(buf.release, "%d.%d%c", &kernel_version_major, &kernel_version_minor, &dummy); + if (ret >= 2 && ((kernel_version_major > 4) || + (kernel_version_major == 4 && kernel_version_minor >= 9))) { + // Check if the device is shipped originally with android P. + return api_level >= MINIMUM_API_REQUIRED; + } + return false; +} + +int loadAndPinProgram(BpfProgInfo* prog, Slice progBlock) { + // Program doesn't exist. Try to load it. + char bpf_log_buf[LOG_BUF_SIZE]; + Slice bpfLog = Slice(bpf_log_buf, sizeof(bpf_log_buf)); + prog->fd.reset(bpfProgLoad(prog->loadType, progBlock, "Apache 2.0", 0, bpfLog)); + if (prog->fd < 0) { + int ret = -errno; + ALOGE("load %s failed: %s", prog->name, strerror(errno)); + return ret; + } + if (prog->attachType == BPF_CGROUP_INET_EGRESS || prog->attachType == BPF_CGROUP_INET_INGRESS) { + unique_fd cg_fd(open(CGROUP_ROOT_PATH, O_DIRECTORY | O_RDONLY | O_CLOEXEC)); + if (cg_fd < 0) { + int ret = -errno; + ALOGE("Failed to open the cgroup directory"); + return ret; + } + int ret = android::bpf::attachProgram(prog->attachType, prog->fd, cg_fd); + if (ret) { + ret = -errno; + ALOGE("%s attach failed: %s", prog->name, strerror(errno)); + return ret; + } + } + if (prog->path) { + int ret = android::bpf::bpfFdPin(prog->fd, prog->path); + if (ret) { + ret = -errno; + ALOGE("Pin %s as file %s failed: %s", prog->name, prog->path, strerror(errno)); + return ret; + } + } + return 0; +} + +int extractAndLoadProg(BpfProgInfo* prog, Elf64_Shdr* sectionPtr, Slice fileContents, + const std::vector& mapPatterns) { + uint64_t progSize = (uint64_t) sectionPtr->sh_size; + Slice progSection = take(drop(fileContents, sectionPtr->sh_offset), progSize); + if (progSection.size() < progSize) { + ALOGE("programSection out of bound"); + return -EINVAL; + } + MemBlock progCopy(progSection); + if (progCopy.get().size() != progSize) { + ALOGE("program cannot be extracted"); + return -EINVAL; + } + Slice remaining = progCopy.get(); + while (remaining.size() >= MAP_CMD_SIZE) { + // Scan the program, examining all possible places that might be the start of a + // map load operation (i.e., all bytes of value MAP_LD_CMD_HEAD). + // In each of these places, check whether it is the start of one of the patterns + // we want to replace, and if so, replace it. + Slice mapHead = findFirstMatching(remaining, MAP_LD_CMD_HEAD); + if (mapHead.size() < MAP_CMD_SIZE) break; + bool replaced = false; + for (const auto& pattern : mapPatterns) { + if (!memcmp(mapHead.base(), pattern.search.data(), MAP_CMD_SIZE)) { + memcpy(mapHead.base(), pattern.replace.data(), MAP_CMD_SIZE); + replaced = true; + break; + } + } + remaining = drop(mapHead, replaced ? MAP_CMD_SIZE : sizeof(uint8_t)); + } + if (!(prog->path) || access(prog->path, R_OK) == -1) { + return loadAndPinProgram(prog, progCopy.get()); + } + return 0; +} + +int parsePrograms(Slice fileContents, BpfProgInfo* programs, size_t size, + const std::vector& mapPatterns) { + Slice elfHeader = take(fileContents, sizeof(Elf64_Ehdr)); + if (elfHeader.size() < sizeof(Elf64_Ehdr)) { + ALOGE("bpf fileContents does not have complete elf header"); + return -EINVAL; + } + + Elf64_Ehdr* elf = (Elf64_Ehdr*) elfHeader.base(); + // Find section names string table. This is the section whose index is e_shstrndx. + if (elf->e_shstrndx == SHN_UNDEF) { + ALOGE("cannot locate namesSection\n"); + return -EINVAL; + } + size_t totalSectionSize = (elf->e_shnum) * sizeof(Elf64_Shdr); + Slice sections = take(drop(fileContents, elf->e_shoff), totalSectionSize); + if (sections.size() < totalSectionSize) { + ALOGE("sections corrupted"); + return -EMSGSIZE; + } + + Slice namesSection = + take(drop(sections, elf->e_shstrndx * sizeof(Elf64_Shdr)), sizeof(Elf64_Shdr)); + if (namesSection.size() != sizeof(Elf64_Shdr)) { + ALOGE("namesSection corrupted"); + return -EMSGSIZE; + } + size_t strTabOffset = ((Elf64_Shdr*) namesSection.base())->sh_offset; + size_t strTabSize = ((Elf64_Shdr*) namesSection.base())->sh_size; + + Slice strTab = take(drop(fileContents, strTabOffset), strTabSize); + if (strTab.size() < strTabSize) { + ALOGE("string table out of bound\n"); + return -EMSGSIZE; + } + + for (int i = 0; i < elf->e_shnum; i++) { + Slice section = take(drop(sections, i * sizeof(Elf64_Shdr)), sizeof(Elf64_Shdr)); + if (section.size() < sizeof(Elf64_Shdr)) { + ALOGE("section %d is out of bound, section size: %zu, header size: %zu, total size: " + "%zu", + i, section.size(), sizeof(Elf64_Shdr), sections.size()); + return -EBADF; + } + Elf64_Shdr* sectionPtr = (Elf64_Shdr*) section.base(); + Slice nameSlice = drop(strTab, sectionPtr->sh_name); + if (nameSlice.size() == 0) { + ALOGE("nameSlice out of bound, i: %d, strTabSize: %zu, sh_name: %u", i, strTabSize, + sectionPtr->sh_name); + return -EBADF; + } + for (size_t i = 0; i < size; i++) { + BpfProgInfo* prog = programs + i; + if (!strcmp((char*) nameSlice.base(), prog->name)) { + int ret = extractAndLoadProg(prog, sectionPtr, fileContents, mapPatterns); + if (ret) return ret; + } + } + } + + // Check all the program struct passed in to make sure they all have a valid fd. + for (size_t i = 0; i < size; i++) { + BpfProgInfo* prog = programs + i; + if (access(prog->path, R_OK) == -1) { + ALOGE("Load program %s failed", prog->name); + return -EINVAL; + } + } + return 0; +} + +int parseProgramsFromFile(const char* path, BpfProgInfo* programs, size_t size, + const std::vector& mapPatterns) { + unique_fd fd(open(path, O_RDONLY)); + int ret; + if (fd < 0) { + ret = -errno; + ALOGE("Failed to open %s program: %s", path, strerror(errno)); + return ret; + } + + struct stat stat; + if (fstat(fd.get(), &stat)) { + ret = -errno; + ALOGE("Failed to get file (%s) size: %s", path, strerror(errno)); + return ret; + } + + off_t fileLen = stat.st_size; + char* baseAddr = + (char*) mmap(NULL, fileLen, PROT_READ, MAP_PRIVATE | MAP_POPULATE, fd.get(), 0); + if (baseAddr == MAP_FAILED) { + ALOGE("Failed to map the program (%s) into memory: %s", path, strerror(errno)); + ret = -errno; + return ret; + } + + ret = parsePrograms(Slice(baseAddr, fileLen), programs, size, mapPatterns); + + munmap(baseAddr, fileLen); + return ret; +} + +} // namespace bpf +} // namespace android diff --git a/libbpf/include/bpf/BpfMap.h b/libbpf/include/bpf/BpfMap.h new file mode 100644 index 0000000..20db43e --- /dev/null +++ b/libbpf/include/bpf/BpfMap.h @@ -0,0 +1,284 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BPF_BPFMAP_H +#define BPF_BPFMAP_H + +#include + +#include +#include +#include +#include "bpf/BpfUtils.h" +#include "netdutils/Status.h" +#include "netdutils/StatusOr.h" + +namespace android { +namespace bpf { + +// This is a class wrapper for eBPF maps. The eBPF map is a special in-kernel +// data structure that stores data in pairs. It can be read/write +// from userspace by passing syscalls with the map file descriptor. This class +// is used to generalize the procedure of interacting with eBPF maps and hide +// the implementation detail from other process. Besides the basic syscalls +// wrapper, it also provides some useful helper functions as well as an iterator +// nested class to iterate the map more easily. +// +// NOTE: A kernel eBPF map may be accessed by both kernel and userspace +// processes at the same time. Or if the map is pinned as a virtual file, it can +// be obtained by multiple eBPF map class object and accessed concurrently. +// Though the map class object and the underlying kernel map are thread safe, it +// is not safe to iterate over a map while another thread or process is deleting +// from it. In this case the iteration can return duplicate entries. +template +class BpfMap { + public: + BpfMap() : mMapFd(-1){}; + BpfMap(int fd) : mMapFd(fd){}; + BpfMap(bpf_map_type map_type, uint32_t max_entries, uint32_t map_flags) { + int map_fd = createMap(map_type, sizeof(Key), sizeof(Value), max_entries, map_flags); + if (map_fd < 0) { + mMapFd.reset(-1); + } else { + mMapFd.reset(map_fd); + } + } + + netdutils::Status pinToPath(const std::string path) { + int ret = bpfFdPin(mMapFd, path.c_str()); + if (ret) { + return netdutils::statusFromErrno(errno, + base::StringPrintf("pin to %s failed", path.c_str())); + } + mPinnedPath = path; + return netdutils::status::ok; + } + + netdutils::StatusOr getFirstKey() const { + Key firstKey; + if (getFirstMapKey(mMapFd, &firstKey)) { + return netdutils::statusFromErrno( + errno, base::StringPrintf("Get firstKey map %d failed", mMapFd.get())); + } + return firstKey; + } + + netdutils::StatusOr getNextKey(const Key& key) const { + Key nextKey; + if (getNextMapKey(mMapFd, const_cast(&key), &nextKey)) { + return netdutils::statusFromErrno( + errno, base::StringPrintf("Get next key of map %d failed", mMapFd.get())); + } + return nextKey; + } + + netdutils::Status writeValue(const Key& key, const Value& value, uint64_t flags) { + if (writeToMapEntry(mMapFd, const_cast(&key), const_cast(&value), flags)) { + return netdutils::statusFromErrno( + errno, base::StringPrintf("write to map %d failed", mMapFd.get())); + } + return netdutils::status::ok; + } + + netdutils::StatusOr readValue(const Key key) const { + Value value; + if (findMapEntry(mMapFd, const_cast(&key), &value)) { + return netdutils::statusFromErrno( + errno, base::StringPrintf("read value of map %d failed", mMapFd.get())); + } + return value; + } + + netdutils::Status deleteValue(const Key& key) { + if (deleteMapEntry(mMapFd, const_cast(&key))) { + return netdutils::statusFromErrno( + errno, base::StringPrintf("delete entry from map %d failed", mMapFd.get())); + } + return netdutils::status::ok; + } + + // Function that tries to get map from a pinned path, if the map doesn't + // exist yet, create a new one and pinned to the path. + netdutils::Status getOrCreate(const uint32_t maxEntries, const char* path, + const bpf_map_type mapType); + + // Iterate through the map and handle each key retrieved based on the filter + // without modification of map content. + netdutils::Status iterate( + const std::function& map)>& + filter) const; + + // Iterate through the map and get each pair, handle each pair based on the filter without modification of map content. + netdutils::Status iterateWithValue( + const std::function& map)>& filter) const; + + // Iterate through the map and handle each key retrieved based on the filter + netdutils::Status iterate( + const std::function& map)>& filter); + + // Iterate through the map and get each pair, handle each pair based on the filter. + netdutils::Status iterateWithValue( + const std::function& map)>& filter); + + const base::unique_fd& getMap() const { return mMapFd; }; + + const std::string getPinnedPath() const { return mPinnedPath; }; + + // Move constructor + void operator=(BpfMap&& other) noexcept { + mMapFd = std::move(other.mMapFd); + if (!other.mPinnedPath.empty()) { + mPinnedPath = other.mPinnedPath; + } else { + mPinnedPath.clear(); + } + other.reset(); + } + + void reset(int fd = -1) { + mMapFd.reset(fd); + mPinnedPath.clear(); + } + + bool isValid() const { return mMapFd != -1; } + + // It is only safe to call this method if it is guaranteed that nothing will concurrently + // iterate over the map in any process. + netdutils::Status clear() { + const auto deleteAllEntries = [](const Key& key, BpfMap& map) { + netdutils::Status res = map.deleteValue(key); + if (!isOk(res) && (res.code() != ENOENT)) { + ALOGE("Failed to delete data %s\n", strerror(res.code())); + } + return netdutils::status::ok; + }; + RETURN_IF_NOT_OK(iterate(deleteAllEntries)); + return netdutils::status::ok; + } + + netdutils::StatusOr isEmpty() const { + auto key = this->getFirstKey(); + // Return error code ENOENT means the map is empty + if (!isOk(key) && key.status().code() == ENOENT) return true; + RETURN_IF_NOT_OK(key); + return false; + } + + private: + base::unique_fd mMapFd; + std::string mPinnedPath; +}; + +template +netdutils::Status BpfMap::getOrCreate(const uint32_t maxEntries, const char* path, + bpf_map_type mapType) { + int ret = access(path, R_OK); + /* Check the pinned location first to check if the map is already there. + * otherwise create a new one. + */ + if (ret == 0) { + mMapFd = base::unique_fd(mapRetrieve(path, 0)); + if (mMapFd == -1) { + reset(); + return netdutils::statusFromErrno( + errno, + base::StringPrintf("pinned map not accessible or does not exist: (%s)\n", path)); + } + mPinnedPath = path; + } else if (ret == -1 && errno == ENOENT) { + mMapFd = base::unique_fd( + createMap(mapType, sizeof(Key), sizeof(Value), maxEntries, BPF_F_NO_PREALLOC)); + if (mMapFd == -1) { + reset(); + return netdutils::statusFromErrno(errno, + base::StringPrintf("map create failed!: %s", path)); + } + netdutils::Status pinStatus = pinToPath(path); + if (!isOk(pinStatus)) { + reset(); + return pinStatus; + } + mPinnedPath = path; + } else { + return netdutils::statusFromErrno( + errno, base::StringPrintf("pinned map not accessible: %s", path)); + } + return netdutils::status::ok; +} + +template +netdutils::Status BpfMap::iterate( + const std::function& map)>& filter) + const { + netdutils::StatusOr curKey = getFirstKey(); + while (isOk(curKey)) { + const netdutils::StatusOr& nextKey = getNextKey(curKey.value()); + RETURN_IF_NOT_OK(filter(curKey.value(), *this)); + curKey = nextKey; + } + return curKey.status().code() == ENOENT ? netdutils::status::ok : curKey.status(); +} + +template +netdutils::Status BpfMap::iterateWithValue( + const std::function& map)>& filter) const { + netdutils::StatusOr curKey = getFirstKey(); + while (isOk(curKey)) { + const netdutils::StatusOr& nextKey = getNextKey(curKey.value()); + Value curValue; + ASSIGN_OR_RETURN(curValue, this->readValue(curKey.value())); + RETURN_IF_NOT_OK(filter(curKey.value(), curValue, *this)); + curKey = nextKey; + } + return curKey.status().code() == ENOENT ? netdutils::status::ok : curKey.status(); +} + +template +netdutils::Status BpfMap::iterate( + const std::function& map)>& filter) { + netdutils::StatusOr curKey = getFirstKey(); + while (isOk(curKey)) { + const netdutils::StatusOr& nextKey = getNextKey(curKey.value()); + RETURN_IF_NOT_OK(filter(curKey.value(), *this)); + curKey = nextKey; + } + return curKey.status().code() == ENOENT ? netdutils::status::ok : curKey.status(); +} + +template +netdutils::Status BpfMap::iterateWithValue( + const std::function& map)>& filter) { + netdutils::StatusOr curKey = getFirstKey(); + while (isOk(curKey)) { + const netdutils::StatusOr& nextKey = getNextKey(curKey.value()); + Value curValue; + ASSIGN_OR_RETURN(curValue, this->readValue(curKey.value())); + RETURN_IF_NOT_OK(filter(curKey.value(), curValue, *this)); + curKey = nextKey; + } + return curKey.status().code() == ENOENT ? netdutils::status::ok : curKey.status(); +} + +} // namespace bpf +} // namespace android + +#endif diff --git a/libbpf/include/bpf/BpfUtils.h b/libbpf/include/bpf/BpfUtils.h new file mode 100644 index 0000000..ed31758 --- /dev/null +++ b/libbpf/include/bpf/BpfUtils.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BPF_BPFUTILS_H +#define BPF_BPFUTILS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "android-base/unique_fd.h" +#include "netdutils/Slice.h" +#include "netdutils/StatusOr.h" + +#define BPF_PASS 1 +#define BPF_DROP 0 + +#define ptr_to_u64(x) ((uint64_t)(uintptr_t)(x)) +#define DEFAULT_LOG_LEVEL 1 + +#define MAP_LD_CMD_HEAD 0x18 +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a))) + +// The BPF instruction bytes that we need to replace. x is a placeholder (e.g., COOKIE_TAG_MAP). +#define BPF_MAP_SEARCH_PATTERN(x) \ + { \ + 0x18, 0x01, 0x00, 0x00, \ + (x)[0], (x)[1], (x)[2], (x)[3], \ + 0x00, 0x00, 0x00, 0x00, \ + (x)[4], (x)[5], (x)[6], (x)[7] \ + } + +// The bytes we'll replace them with. x is the actual fd number for the map at runtime. +// The second byte is changed from 0x01 to 0x11 since 0x11 is the special command used +// for bpf map fd loading. The original 0x01 is only a normal load command. +#define BPF_MAP_REPLACE_PATTERN(x) \ + { \ + 0x18, 0x11, 0x00, 0x00, \ + (x)[0], (x)[1], (x)[2], (x)[3], \ + 0x00, 0x00, 0x00, 0x00, \ + (x)[4], (x)[5], (x)[6], (x)[7] \ + } + +#define MAP_CMD_SIZE 16 + +namespace android { +namespace bpf { + +struct UidTag { + uint32_t uid; + uint32_t tag; +}; + +struct StatsKey { + uint32_t uid; + uint32_t tag; + uint32_t counterSet; + uint32_t ifaceIndex; +}; + +struct StatsValue { + uint64_t rxPackets; + uint64_t rxBytes; + uint64_t txPackets; + uint64_t txBytes; +}; + +struct Stats { + uint64_t rxBytes; + uint64_t rxPackets; + uint64_t txBytes; + uint64_t txPackets; + uint64_t tcpRxPackets; + uint64_t tcpTxPackets; +}; + +struct IfaceValue { + char name[IFNAMSIZ]; +}; + +struct BpfProgInfo { + bpf_attach_type attachType; + const char* path; + const char* name; + bpf_prog_type loadType; + base::unique_fd fd; +}; + +int mapRetrieve(const char* pathname, uint32_t flags); + +struct BpfMapInfo { + std::array search; + std::array replace; + const int fd; + std::string path; + + BpfMapInfo(uint64_t dummyFd, const char* mapPath) + : BpfMapInfo(dummyFd, android::bpf::mapRetrieve(mapPath, 0)) {} + + BpfMapInfo(uint64_t dummyFd, int realFd, const char* mapPath = "") : fd(realFd), path(mapPath) { + search = BPF_MAP_SEARCH_PATTERN((uint8_t*) &dummyFd); + replace = BPF_MAP_REPLACE_PATTERN((uint8_t*) &realFd); + } +}; + +#ifndef DEFAULT_OVERFLOWUID +#define DEFAULT_OVERFLOWUID 65534 +#endif + +constexpr const char* CGROUP_ROOT_PATH = "/dev/cg2_bpf"; + +constexpr const int OVERFLOW_COUNTERSET = 2; + +constexpr const uint64_t NONEXISTENT_COOKIE = 0; + +constexpr const int MINIMUM_API_REQUIRED = 28; + +int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size, + uint32_t max_entries, uint32_t map_flags); +int writeToMapEntry(const base::unique_fd& map_fd, void* key, void* value, uint64_t flags); +int findMapEntry(const base::unique_fd& map_fd, void* key, void* value); +int deleteMapEntry(const base::unique_fd& map_fd, void* key); +int getNextMapKey(const base::unique_fd& map_fd, void* key, void* next_key); +int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey); +int bpfProgLoad(bpf_prog_type prog_type, netdutils::Slice bpf_insns, const char* license, + uint32_t kern_version, netdutils::Slice bpf_log); +int bpfFdPin(const base::unique_fd& map_fd, const char* pathname); +int attachProgram(bpf_attach_type type, uint32_t prog_fd, uint32_t cg_fd); +int detachProgram(bpf_attach_type type, uint32_t cg_fd); +uint64_t getSocketCookie(int sockFd); +bool hasBpfSupport(); +int parseProgramsFromFile(const char* path, BpfProgInfo* programs, size_t size, + const std::vector& mapPatterns); + +#define SKIP_IF_BPF_NOT_SUPPORTED \ + do { \ + if (!hasBpfSupport()) return; \ + } while (0) + +constexpr int BPF_CONTINUE = 0; +constexpr int BPF_DELETED = 1; + +bool operator==(const StatsValue& lhs, const StatsValue& rhs); +bool operator==(const UidTag& lhs, const UidTag& rhs); +bool operator==(const StatsKey& lhs, const StatsKey& rhs); + +} // namespace bpf +} // namespace android + +#endif