diff --git a/libc/bionic/system_properties.cpp b/libc/bionic/system_properties.cpp index 170e7ac97..aae99b12c 100644 --- a/libc/bionic/system_properties.cpp +++ b/libc/bionic/system_properties.cpp @@ -51,7 +51,6 @@ #include #include -#include "private/bionic_atomic_inline.h" #include "private/bionic_futex.h" #include "private/bionic_macros.h" @@ -80,22 +79,26 @@ struct prop_bt { uint8_t namelen; uint8_t reserved[3]; - // TODO: The following fields should be declared as atomic_uint32_t. - // They should be assigned to with release semantics, instead of using - // explicit fences. Unfortunately, the read accesses are generally - // followed by more dependent read accesses, and the dependence - // is assumed to enforce memory ordering. Which it does on supported - // hardware. This technically should use memory_order_consume, if - // that worked as intended. + // The property trie is updated only by the init process (single threaded) which provides + // property service. And it can be read by multiple threads at the same time. + // As the property trie is not protected by locks, we use atomic_uint_least32_t types for the + // left, right, children "pointers" in the trie node. To make sure readers who see the + // change of "pointers" can also notice the change of prop_bt structure contents pointed by + // the "pointers", we always use release-consume ordering pair when accessing these "pointers". + + // prop "points" to prop_info structure if there is a propery associated with the trie node. + // Its situation is similar to the left, right, children "pointers". So we use + // atomic_uint_least32_t and release-consume ordering to protect it as well. + // We should also avoid rereading these fields redundantly, since not // all processor implementations ensure that multiple loads from the // same field are carried out in the right order. - volatile uint32_t prop; + atomic_uint_least32_t prop; - volatile uint32_t left; - volatile uint32_t right; + atomic_uint_least32_t left; + atomic_uint_least32_t right; - volatile uint32_t children; + atomic_uint_least32_t children; char name[0]; @@ -103,8 +106,6 @@ struct prop_bt { this->namelen = name_length; memcpy(this->name, name, name_length); this->name[name_length] = '\0'; - ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store - // for subsequent pointer assignment. } private: @@ -143,8 +144,6 @@ struct prop_info { atomic_init(&this->serial, valuelen << 24); memcpy(this->value, value, valuelen); this->value[valuelen] = '\0'; - ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store - // for subsequent point assignment. } private: DISALLOW_COPY_AND_ASSIGN(prop_info); @@ -291,10 +290,10 @@ static int map_prop_area() return map_result; } -static void *allocate_obj(const size_t size, uint32_t *const off) +static void *allocate_obj(const size_t size, uint_least32_t *const off) { prop_area *pa = __system_property_area__; - const size_t aligned = BIONIC_ALIGN(size, sizeof(uint32_t)); + const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t)); if (pa->bytes_used + aligned > pa_data_size) { return NULL; } @@ -304,12 +303,12 @@ static void *allocate_obj(const size_t size, uint32_t *const off) return pa->data + *off; } -static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const off) +static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off) { - uint32_t new_offset; - void *const offset = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset); - if (offset) { - prop_bt* bt = new(offset) prop_bt(name, namelen); + uint_least32_t new_offset; + void *const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset); + if (p != NULL) { + prop_bt* bt = new(p) prop_bt(name, namelen); *off = new_offset; return bt; } @@ -318,20 +317,20 @@ static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const o } static prop_info *new_prop_info(const char *name, uint8_t namelen, - const char *value, uint8_t valuelen, uint32_t *const off) + const char *value, uint8_t valuelen, uint_least32_t *const off) { - uint32_t off_tmp; - void* const offset = allocate_obj(sizeof(prop_info) + namelen + 1, &off_tmp); - if (offset) { - prop_info* info = new(offset) prop_info(name, namelen, value, valuelen); - *off = off_tmp; + uint_least32_t new_offset; + void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset); + if (p != NULL) { + prop_info* info = new(p) prop_info(name, namelen, value, valuelen); + *off = new_offset; return info; } return NULL; } -static void *to_prop_obj(const uint32_t off) +static void *to_prop_obj(uint_least32_t off) { if (off > pa_data_size) return NULL; @@ -341,7 +340,17 @@ static void *to_prop_obj(const uint32_t off) return (__system_property_area__->data + off); } -static prop_bt *root_node() +static inline prop_bt *to_prop_bt(atomic_uint_least32_t* off_p) { + uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume); + return reinterpret_cast(to_prop_obj(off)); +} + +static inline prop_info *to_prop_info(atomic_uint_least32_t* off_p) { + uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume); + return reinterpret_cast(to_prop_obj(off)); +} + +static inline prop_bt *root_node() { return reinterpret_cast(to_prop_obj(0)); } @@ -373,36 +382,34 @@ static prop_bt *find_prop_bt(prop_bt *const bt, const char *name, } if (ret < 0) { - if (current->left) { - current = reinterpret_cast(to_prop_obj(current->left)); + uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed); + if (left_offset != 0) { + current = to_prop_bt(¤t->left); } else { if (!alloc_if_needed) { return NULL; } - // Note that there isn't a race condition here. "clients" never - // reach this code-path since It's only the (single threaded) server - // that allocates new nodes. Though "bt->left" is volatile, it can't - // have changed since the last value was last read. - uint32_t new_offset = 0; + uint_least32_t new_offset; prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); if (new_bt) { - current->left = new_offset; + atomic_store_explicit(¤t->left, new_offset, memory_order_release); } return new_bt; } } else { - if (current->right) { - current = reinterpret_cast(to_prop_obj(current->right)); + uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed); + if (right_offset != 0) { + current = to_prop_bt(¤t->right); } else { if (!alloc_if_needed) { return NULL; } - uint32_t new_offset; + uint_least32_t new_offset; prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset); if (new_bt) { - current->right = new_offset; + atomic_store_explicit(¤t->right, new_offset, memory_order_release); } return new_bt; } @@ -429,13 +436,14 @@ static const prop_info *find_property(prop_bt *const trie, const char *name, } prop_bt* root = NULL; - if (current->children) { - root = reinterpret_cast(to_prop_obj(current->children)); + uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed); + if (children_offset != 0) { + root = to_prop_bt(¤t->children); } else if (alloc_if_needed) { - uint32_t new_bt_offset; - root = new_prop_bt(remaining_name, substr_size, &new_bt_offset); + uint_least32_t new_offset; + root = new_prop_bt(remaining_name, substr_size, &new_offset); if (root) { - current->children = new_bt_offset; + atomic_store_explicit(¤t->children, new_offset, memory_order_release); } } @@ -454,13 +462,14 @@ static const prop_info *find_property(prop_bt *const trie, const char *name, remaining_name = sep + 1; } - if (current->prop) { - return reinterpret_cast(to_prop_obj(current->prop)); + uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed); + if (prop_offset != 0) { + return to_prop_info(¤t->prop); } else if (alloc_if_needed) { - uint32_t new_info_offset; - prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_info_offset); + uint_least32_t new_offset; + prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset); if (new_info) { - current->prop = new_info_offset; + atomic_store_explicit(¤t->prop, new_offset, memory_order_release); } return new_info; @@ -534,31 +543,34 @@ static void find_nth_fn(const prop_info *pi, void *ptr) cookie->count++; } -static int foreach_property(const uint32_t off, +static int foreach_property(prop_bt *const trie, void (*propfn)(const prop_info *pi, void *cookie), void *cookie) { - prop_bt *trie = reinterpret_cast(to_prop_obj(off)); if (!trie) return -1; - if (trie->left) { - const int err = foreach_property(trie->left, propfn, cookie); + uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed); + if (left_offset != 0) { + const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie); if (err < 0) return -1; } - if (trie->prop) { - prop_info *info = reinterpret_cast(to_prop_obj(trie->prop)); + uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed); + if (prop_offset != 0) { + prop_info *info = to_prop_info(&trie->prop); if (!info) return -1; propfn(info, cookie); } - if (trie->children) { - const int err = foreach_property(trie->children, propfn, cookie); + uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed); + if (children_offset != 0) { + const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie); if (err < 0) return -1; } - if (trie->right) { - const int err = foreach_property(trie->right, propfn, cookie); + uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed); + if (right_offset != 0) { + const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie); if (err < 0) return -1; } @@ -766,5 +778,5 @@ int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie), return __system_property_foreach_compat(propfn, cookie); } - return foreach_property(0, propfn, cookie); + return foreach_property(root_node(), propfn, cookie); }