/*- * Copyright (c) 2014--2019 Taylor R. Campbell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include /* * Tried rwlock but it is a total loss. Spin lock keeps the operations * balanced but has low throughput; mutex gives reasonable throughput * for lookups but throttles modifications. */ #ifdef RBTREE_SPINLOCK #define pthread_lock_t pthread_spinlock_t #define pthread_lock_init(l) pthread_spin_init(l,0) #define pthread_lock_destroy pthread_spin_destroy #define pthread_lock pthread_spin_lock #define pthread_unlock pthread_spin_unlock #else #define pthread_lock_t pthread_mutex_t #define pthread_lock_init(l) pthread_mutex_init(l,NULL) #define pthread_lock_destroy pthread_mutex_destroy #define pthread_lock pthread_mutex_lock #define pthread_unlock pthread_mutex_unlock #endif struct ent; struct map { rb_tree_ops_t m_rbtree_ops; struct rb_tree m_rbtree; pthread_lock_t m_lock; ptrdiff_t m_keyoff; size_t m_keysz; }; struct ent { struct rb_node ent_rbnode; }; struct map_read { char mr_dummy; }; struct map_txn_delete { char mtd_dummy; }; static int map_rbtree_compare_nodes(void *cookie, const void *a, const void *b) { struct map *M = cookie; const void *ka = (const char *)a + M->m_keyoff; const void *kb = (const char *)b + M->m_keyoff; return memcmp(ka, kb, M->m_keysz); } static int map_rbtree_compare_key(void *cookie, const void *n, const void *k) { struct map *M = cookie; const void *nk = (const char *)n + M->m_keyoff; return memcmp(nk, k, M->m_keysz); } static const rb_tree_ops_t map_rbtree_ops_template = { .rbto_compare_nodes = &map_rbtree_compare_nodes, .rbto_compare_key = &map_rbtree_compare_key, .rbto_node_offset = offsetof(struct ent, ent_rbnode), }; static void map_init(struct map *M, ptrdiff_t keyoff, size_t keysz) { int error; memcpy(&M->m_rbtree_ops, &map_rbtree_ops_template, sizeof M->m_rbtree_ops); M->m_rbtree_ops.rbto_context = M; rb_tree_init(&M->m_rbtree, &M->m_rbtree_ops); error = pthread_lock_init(&M->m_lock); if (error) { errno = error; err(1, "pthread_lock_init"); } M->m_keyoff = keyoff; M->m_keysz = keysz; } static void map_destroy(struct map *M) { pthread_lock_destroy(&M->m_lock); #if 0 /* no rb_tree_destroy */ rb_tree_destroy(&M->m_rbtree); #endif } static void map_register_thread(struct map *M __unused) { } static void map_deregister_thread(struct map *M __unused) { } static void map_read_enter(struct map *M, struct map_read *mr __unused) { int error; error = pthread_lock(&M->m_lock); if (error) { errno = error; err(1, "pthread_lock"); } } static void map_read_exit(struct map *M, struct map_read *mr __unused) { int error; error = pthread_unlock(&M->m_lock); if (error) { errno = error; err(1, "pthread_unlock"); } } static struct ent * map_lookup(struct map *M, const void *key) { return rb_tree_find_node(&M->m_rbtree, key); } static bool map_insert(struct map *M, const void *key __unused, struct ent *ent) { int error; bool ok; if ((error = pthread_lock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_lock"); } ok = (rb_tree_insert_node(&M->m_rbtree, ent) == ent); if ((error = pthread_unlock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_unlock"); } return ok; } static struct ent * map_delete(struct map *M, const void *key) { struct ent *ent; int error; if ((error = pthread_lock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_wrlock"); } ent = rb_tree_find_node(&M->m_rbtree, key); if (ent != NULL) rb_tree_remove_node(&M->m_rbtree, ent); if ((error = pthread_unlock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_unlock"); } return ent; } static void map_txn_delete_begin(struct map *M, struct map_txn_delete *T __unused, size_t n __unused) { int error; if ((error = pthread_lock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_wrlock"); } } static void map_txn_delete_commit(struct map *M, struct map_txn_delete *T __unused) { int error; if ((error = pthread_unlock(&M->m_lock)) != 0) { errno = error; err(1, "pthread_unlock"); } } static struct ent * map_txn_delete(struct map *M, const void *key, struct map_txn_delete *T __unused) { struct ent *ent; ent = rb_tree_find_node(&M->m_rbtree, key); if (ent != NULL) rb_tree_remove_node(&M->m_rbtree, ent); return ent; } #include "stress.i"