New bpfloader netd kernel program
Change the netd bpf program to the new format. Adding map definition and necessary helper functions to the kernel program code. Move the netd bpf programs out of bpfloader to a new directory. Test: dumpsys netd trafficcontroller Bug: 112334572 Change-Id: I9287285d188e966193532b1522b5d3e67e32e930
This commit is contained in:
parent
18d8bfcfe9
commit
c1dd7648b1
7 changed files with 139 additions and 72 deletions
|
@ -44,19 +44,9 @@ cc_binary {
|
|||
],
|
||||
|
||||
required: [
|
||||
"bpf_kern.o",
|
||||
"netd.o",
|
||||
// Uncomment once security related patches ready
|
||||
// "time_in_state.o",
|
||||
],
|
||||
|
||||
}
|
||||
|
||||
bpf {
|
||||
name: "bpf_kern.o",
|
||||
srcs: ["bpf_kern.c"],
|
||||
cflags: [
|
||||
"-Wall",
|
||||
"-Werror",
|
||||
],
|
||||
include_dirs: ["system/netd/libnetdbpf/include"],
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ using android::base::unique_fd;
|
|||
using std::string;
|
||||
|
||||
#define BPF_PROG_PATH "/system/etc/bpf/"
|
||||
#define BPF_PROG_SRC BPF_PROG_PATH "bpf_kern.o"
|
||||
|
||||
#define CLEANANDEXIT(ret, mapPatterns) \
|
||||
do { \
|
||||
|
@ -89,37 +88,4 @@ void loadAllElfObjects(void) {
|
|||
int main() {
|
||||
// Load all ELF objects, create programs and maps, and pin them
|
||||
loadAllElfObjects();
|
||||
|
||||
const std::vector<BpfMapInfo> mapPatterns = {
|
||||
BpfMapInfo(COOKIE_TAG_MAP, COOKIE_TAG_MAP_PATH),
|
||||
BpfMapInfo(UID_COUNTERSET_MAP, UID_COUNTERSET_MAP_PATH),
|
||||
BpfMapInfo(APP_UID_STATS_MAP, APP_UID_STATS_MAP_PATH),
|
||||
BpfMapInfo(UID_STATS_MAP, UID_STATS_MAP_PATH),
|
||||
BpfMapInfo(TAG_STATS_MAP, TAG_STATS_MAP_PATH),
|
||||
BpfMapInfo(IFACE_STATS_MAP, IFACE_STATS_MAP_PATH),
|
||||
BpfMapInfo(CONFIGURATION_MAP, CONFIGURATION_MAP_PATH),
|
||||
BpfMapInfo(UID_OWNER_MAP, UID_OWNER_MAP_PATH),
|
||||
};
|
||||
for (size_t i = 0; i < mapPatterns.size(); i++) {
|
||||
if (mapPatterns[i].fd < 0) {
|
||||
ALOGE("Rerieve Map from %s failed: %d", mapPatterns[i].path.c_str(), mapPatterns[i].fd);
|
||||
CLEANANDEXIT(-1, mapPatterns);
|
||||
}
|
||||
}
|
||||
BpfProgInfo programs[] = {
|
||||
{BPF_CGROUP_INET_EGRESS, BPF_EGRESS_PROG_PATH, BPF_CGROUP_EGRESS_PROG_NAME,
|
||||
BPF_PROG_TYPE_CGROUP_SKB, unique_fd(-1)},
|
||||
{BPF_CGROUP_INET_INGRESS, BPF_INGRESS_PROG_PATH, BPF_CGROUP_INGRESS_PROG_NAME,
|
||||
BPF_PROG_TYPE_CGROUP_SKB, unique_fd(-1)},
|
||||
{MAX_BPF_ATTACH_TYPE, XT_BPF_INGRESS_PROG_PATH, XT_BPF_INGRESS_PROG_NAME,
|
||||
BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)},
|
||||
{MAX_BPF_ATTACH_TYPE, XT_BPF_EGRESS_PROG_PATH, XT_BPF_EGRESS_PROG_NAME,
|
||||
BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)},
|
||||
{MAX_BPF_ATTACH_TYPE, XT_BPF_WHITELIST_PROG_PATH, XT_BPF_WHITELIST_PROG_NAME,
|
||||
BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)},
|
||||
{MAX_BPF_ATTACH_TYPE, XT_BPF_BLACKLIST_PROG_PATH, XT_BPF_BLACKLIST_PROG_NAME,
|
||||
BPF_PROG_TYPE_SOCKET_FILTER, unique_fd(-1)}};
|
||||
int ret = android::bpf::parseProgramsFromFile(BPF_PROG_SRC, programs, ARRAY_SIZE(programs),
|
||||
mapPatterns);
|
||||
CLEANANDEXIT(ret, mapPatterns);
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ int bpfFdPin(const base::unique_fd& map_fd, const char* pathname) {
|
|||
return bpf(BPF_OBJ_PIN, Slice(&attr, sizeof(attr)));
|
||||
}
|
||||
|
||||
int mapRetrieve(const char* pathname, uint32_t flag) {
|
||||
int bpfFdGet(const char* pathname, uint32_t flag) {
|
||||
bpf_attr attr;
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.pathname = ptr_to_u64((void*)pathname);
|
||||
|
@ -175,6 +175,10 @@ int mapRetrieve(const char* pathname, uint32_t flag) {
|
|||
return bpf(BPF_OBJ_GET, Slice(&attr, sizeof(attr)));
|
||||
}
|
||||
|
||||
int mapRetrieve(const char* pathname, uint32_t flag) {
|
||||
return bpfFdGet(pathname, flag);
|
||||
}
|
||||
|
||||
int attachProgram(bpf_attach_type type, uint32_t prog_fd, uint32_t cg_fd) {
|
||||
bpf_attr attr;
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
|
|
|
@ -143,6 +143,7 @@ int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey);
|
|||
int bpfProgLoad(bpf_prog_type prog_type, netdutils::Slice bpf_insns, const char* license,
|
||||
uint32_t kern_version, netdutils::Slice bpf_log);
|
||||
int bpfFdPin(const base::unique_fd& map_fd, const char* pathname);
|
||||
int bpfFdGet(const char* pathname, uint32_t flags);
|
||||
int attachProgram(bpf_attach_type type, uint32_t prog_fd, uint32_t cg_fd);
|
||||
int detachProgram(bpf_attach_type type, uint32_t cg_fd);
|
||||
uint64_t getSocketCookie(int sockFd);
|
||||
|
|
28
progs/Android.bp
Normal file
28
progs/Android.bp
Normal file
|
@ -0,0 +1,28 @@
|
|||
//
|
||||
// Copyright (C) 2019 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
//
|
||||
// bpf kernel programs
|
||||
//
|
||||
bpf {
|
||||
name: "netd.o",
|
||||
srcs: ["netd.c"],
|
||||
cflags: [
|
||||
"-Wall",
|
||||
"-Werror",
|
||||
],
|
||||
include_dirs: ["system/netd/libnetdbpf/include"],
|
||||
}
|
|
@ -14,47 +14,48 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "netd.h"
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_kern.h"
|
||||
|
||||
|
||||
ELF_SEC(BPF_CGROUP_INGRESS_PROG_NAME)
|
||||
SEC("cgroupskb/ingress/stats")
|
||||
int bpf_cgroup_ingress(struct __sk_buff* skb) {
|
||||
return bpf_traffic_account(skb, BPF_INGRESS);
|
||||
}
|
||||
|
||||
ELF_SEC(BPF_CGROUP_EGRESS_PROG_NAME)
|
||||
SEC("cgroupskb/egress/stats")
|
||||
int bpf_cgroup_egress(struct __sk_buff* skb) {
|
||||
return bpf_traffic_account(skb, BPF_EGRESS);
|
||||
}
|
||||
|
||||
ELF_SEC(XT_BPF_EGRESS_PROG_NAME)
|
||||
SEC("skfilter/egress/xtbpf")
|
||||
int xt_bpf_egress_prog(struct __sk_buff* skb) {
|
||||
uint32_t key = skb->ifindex;
|
||||
bpf_update_stats(skb, IFACE_STATS_MAP, BPF_EGRESS, &key);
|
||||
bpf_update_stats(skb, &iface_stats_map, BPF_EGRESS, &key);
|
||||
return BPF_MATCH;
|
||||
}
|
||||
|
||||
ELF_SEC(XT_BPF_INGRESS_PROG_NAME)
|
||||
SEC("skfilter/ingress/xtbpf")
|
||||
int xt_bpf_ingress_prog(struct __sk_buff* skb) {
|
||||
uint32_t key = skb->ifindex;
|
||||
bpf_update_stats(skb, IFACE_STATS_MAP, BPF_INGRESS, &key);
|
||||
bpf_update_stats(skb, &iface_stats_map, BPF_INGRESS, &key);
|
||||
return BPF_MATCH;
|
||||
}
|
||||
|
||||
ELF_SEC(XT_BPF_WHITELIST_PROG_NAME)
|
||||
SEC("skfilter/whitelist/xtbpf")
|
||||
int xt_bpf_whitelist_prog(struct __sk_buff* skb) {
|
||||
uint32_t sock_uid = get_socket_uid(skb);
|
||||
if (is_system_uid(sock_uid)) return BPF_MATCH;
|
||||
uint8_t* whitelistMatch = find_map_entry(UID_OWNER_MAP, &sock_uid);
|
||||
uint8_t* whitelistMatch = find_map_entry(&uid_owner_map, &sock_uid);
|
||||
if (whitelistMatch) return *whitelistMatch & HAPPY_BOX_MATCH;
|
||||
return BPF_NOMATCH;
|
||||
}
|
||||
|
||||
ELF_SEC(XT_BPF_BLACKLIST_PROG_NAME)
|
||||
SEC("skfilter/blacklist/xtbpf")
|
||||
int xt_bpf_blacklist_prog(struct __sk_buff* skb) {
|
||||
uint32_t sock_uid = get_socket_uid(skb);
|
||||
uint8_t* blacklistMatch = find_map_entry(UID_OWNER_MAP, &sock_uid);
|
||||
uint8_t* blacklistMatch = find_map_entry(&uid_owner_map, &sock_uid);
|
||||
if (blacklistMatch) return *blacklistMatch & PENALTY_BOX_MATCH;
|
||||
return BPF_NOMATCH;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "Apache 2.0";
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
|
||||
/*
|
||||
* This h file together with bpf_kern.c is used for compiling the eBPF kernel
|
||||
* This h file together with netd.c is used for compiling the eBPF kernel
|
||||
* program.
|
||||
*/
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
|||
#include <stdint.h>
|
||||
#include "netdbpf/bpf_shared.h"
|
||||
|
||||
#define ELF_SEC(NAME) __attribute__((section(NAME), used))
|
||||
#define SEC(NAME) __attribute__((section(NAME), used))
|
||||
|
||||
struct uid_tag {
|
||||
uint32_t uid;
|
||||
|
@ -51,11 +51,15 @@ struct stats_value {
|
|||
uint64_t txBytes;
|
||||
};
|
||||
|
||||
struct IfaceValue {
|
||||
char name[IFNAMSIZ];
|
||||
};
|
||||
|
||||
/* helper functions called from eBPF programs written in C */
|
||||
static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
|
||||
static int (*write_to_map_entry)(uint64_t map, void* key, void* value,
|
||||
static void* (*find_map_entry)(void* map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
|
||||
static int (*write_to_map_entry)(void* map, void* key, void* value,
|
||||
uint64_t flags) = (void*)BPF_FUNC_map_update_elem;
|
||||
static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem;
|
||||
static int (*delete_map_entry)(void* map, void* key) = (void*)BPF_FUNC_map_delete_elem;
|
||||
static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
|
||||
static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
|
||||
static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
|
||||
|
@ -78,12 +82,85 @@ static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
|
|||
#define TCP_FLAG_OFF 13
|
||||
#define RST_OFFSET 2
|
||||
|
||||
/* loader usage */
|
||||
struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
unsigned int pad[2];
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") cookie_tag_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint64_t),
|
||||
.value_size = sizeof(struct uid_tag),
|
||||
.max_entries = COOKIE_UID_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") uid_counterset_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(uint8_t),
|
||||
.max_entries = UID_COUNTERSET_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") app_uid_stats_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(struct stats_value),
|
||||
.max_entries = UID_STATS_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") uid_stats_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(struct stats_key),
|
||||
.value_size = sizeof(struct stats_value),
|
||||
.max_entries = UID_STATS_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") tag_stats_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(struct stats_key),
|
||||
.value_size = sizeof(struct stats_value),
|
||||
.max_entries = TAG_STATS_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") iface_stats_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(struct stats_value),
|
||||
.max_entries = IFACE_STATS_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") configuration_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(uint8_t),
|
||||
.max_entries = CONFIGURATION_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") uid_owner_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(uint8_t),
|
||||
.max_entries = UID_OWNER_MAP_SIZE,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") iface_index_name_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(struct IfaceValue),
|
||||
.max_entries = IFACE_INDEX_NAME_MAP_SIZE,
|
||||
};
|
||||
|
||||
static __always_inline int is_system_uid(uint32_t uid) {
|
||||
return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
|
||||
}
|
||||
|
||||
static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
|
||||
int direction, void *key) {
|
||||
static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, struct bpf_map_def* map,
|
||||
int direction, void* key) {
|
||||
struct stats_value* value;
|
||||
value = find_map_entry(map, key);
|
||||
if (!value) {
|
||||
|
@ -143,7 +220,7 @@ static inline bool skip_owner_match(struct __sk_buff* skb) {
|
|||
|
||||
static __always_inline BpfConfig getConfig() {
|
||||
uint32_t mapSettingKey = CONFIGURATION_KEY;
|
||||
BpfConfig* config = find_map_entry(CONFIGURATION_MAP, &mapSettingKey);
|
||||
BpfConfig* config = find_map_entry(&configuration_map, &mapSettingKey);
|
||||
if (!config) {
|
||||
// Couldn't read configuration entry. Assume everything is disabled.
|
||||
return DEFAULT_CONFIG;
|
||||
|
@ -161,7 +238,7 @@ static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
|
|||
return BPF_PASS;
|
||||
}
|
||||
|
||||
uint8_t* uidEntry = find_map_entry(UID_OWNER_MAP, &uid);
|
||||
uint8_t* uidEntry = find_map_entry(&uid_owner_map, &uid);
|
||||
uint8_t uidRules = uidEntry ? *uidEntry : 0;
|
||||
if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
|
||||
return BPF_DROP;
|
||||
|
@ -185,7 +262,7 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int
|
|||
}
|
||||
|
||||
uint64_t cookie = get_socket_cookie(skb);
|
||||
struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie);
|
||||
struct uid_tag* utag = find_map_entry(&cookie_tag_map, &cookie);
|
||||
uint32_t uid, tag;
|
||||
if (utag) {
|
||||
uid = utag->uid;
|
||||
|
@ -197,15 +274,15 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int
|
|||
|
||||
struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
|
||||
|
||||
uint8_t* counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid);
|
||||
uint8_t* counterSet = find_map_entry(&uid_counterset_map, &uid);
|
||||
if (counterSet) key.counterSet = (uint32_t)*counterSet;
|
||||
|
||||
if (tag) {
|
||||
bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
|
||||
bpf_update_stats(skb, &tag_stats_map, direction, &key);
|
||||
}
|
||||
|
||||
key.tag = 0;
|
||||
bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
|
||||
bpf_update_stats(skb, APP_UID_STATS_MAP, direction, &uid);
|
||||
bpf_update_stats(skb, &uid_stats_map, direction, &key);
|
||||
bpf_update_stats(skb, &app_uid_stats_map, direction, &uid);
|
||||
return match;
|
||||
}
|
Loading…
Reference in a new issue