[PATCH] lib + libdw: Add and use a concurrent version of the dynamic-size hash table.

Jonathon Anderson jma14@rice.edu
Mon Nov 4 17:03:00 GMT 2019


From: Srđan Milaković <sm108@rice.edu>

Signed-off-by: Srđan Milaković <sm108@rice.edu>
---
Apologies for the delay, things have been busy here the past week.

> On Fri, 2019-10-25 at 23:11 -0500, Jonathon Anderson wrote:
> > I tried to simplify the code a little. You already observed that
> > COMPARE can be zero. But it always is. We never try comparing values.
> > So all the COMPARE and value passing to the find functions can simply
> > be removed. So if you agree I would like to merge the attached
> > simplification diff into this patch.
> >
> > I'm fine with it, although at a glance it seems that this means
> > insert_helper will never return -1. Which doesn't quite sound accurate,
> > so I'll have to defer to Srdan on this one.
>
> Srdan, any feedback on this?

Apologies, I talked with Srdan in person and forgot to relay the message. He
gave me an updated version of the hash table that handles that issue (and
another previously harmless typo).

> On Mon, 2019-10-28 at 21:12 +0100, Mark Wielaard wrote:
> > On Sun, 2019-10-27 at 17:13 +0100, Mark Wielaard wrote:
> > > On Fri, 2019-10-25 at 23:11 -0500, Jonathon Anderson wrote:
> > > > A quick grep -r shows that ITERATE and REVERSE are used for the
> > > > asm_symbol_tab. If iteration and insertion are not concurrent we can
> > > > easily add bidirectional iteration (using the same Treiber stack-like
> > > > structure as used for the memory management). Also COMPARE is not
> > > > defined to be (0) in this instance.
> > >
> > > Yes. We would need some other solution for the libasm code. But I think
> > > we can use the concurrent table for everything in libdw.
> >
> > And everything in libdw besides the abbrev hash is the sig8 hash.
> > And adopting the concurrent dynamic size hash for that is almost
> > trivial. See attached.
> >
> > I only tested it lightly because I don't have any large projects build
> > with -fdebug-types-dection. But it seems to work as intended.
>
> Any comment on this patch?

I'm a little wary of applying this to Sig8, only because the contents are
entire CUs (rather than just Abbrevs). It seems to work though, so I just
need to make a more careful pass to convince myself that it works in
general too.

This patch has the updated hash table and all the other suggested patches
integrated in, it compiles and passes both the test suite and our tests.

 lib/ChangeLog                    |   5 +
 lib/Makefile.am                  |   4 +-
 lib/dynamicsizehash_concurrent.c | 482 +++++++++++++++++++++++++++++++
 lib/dynamicsizehash_concurrent.h | 118 ++++++++
 libdw/ChangeLog                  |  12 +
 libdw/dwarf_abbrev_hash.c        |   2 +-
 libdw/dwarf_abbrev_hash.h        |   3 +-
 libdw/dwarf_formref_die.c        |   2 +-
 libdw/dwarf_getabbrev.c          |   2 +-
 libdw/dwarf_sig8_hash.c          |   2 +-
 libdw/dwarf_sig8_hash.h          |   9 +-
 libdw/dwarf_tag.c                |   2 +-
 12 files changed, 632 insertions(+), 11 deletions(-)
 create mode 100644 lib/dynamicsizehash_concurrent.c
 create mode 100644 lib/dynamicsizehash_concurrent.h

diff --git a/lib/ChangeLog b/lib/ChangeLog
index 3799c3aa..51c79841 100644
--- a/lib/ChangeLog
+++ b/lib/ChangeLog
@@ -1,3 +1,8 @@
+2019-08-25  Srđan Milaković  <sm108@rice.edu>
+
+	* dynamicsizehash_concurrent.{c,h}: New files.
+	* Makefile.am (noinst_HEADERS): Added dynamicsizehash_concurrent.h.
+
 2019-08-25  Jonathon Anderson  <jma14@rice.edu>
 
 	* stdatomic-fbsd.h: New file, taken from FreeBSD.
diff --git a/lib/Makefile.am b/lib/Makefile.am
index 3086cf06..97bf7329 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -39,8 +39,8 @@ libeu_a_SOURCES = xstrdup.c xstrndup.c xmalloc.c next_prime.c \
 
 noinst_HEADERS = fixedsizehash.h libeu.h system.h dynamicsizehash.h list.h \
 		 eu-config.h color.h printversion.h bpf.h \
-		 atomics.h stdatomic-fbsd.h
-EXTRA_DIST = dynamicsizehash.c
+		 atomics.h stdatomic-fbsd.h dynamicsizehash_concurrent.h
+EXTRA_DIST = dynamicsizehash.c dynamicsizehash_concurrent.c
 
 if !GPROF
 xmalloc_CFLAGS = -ffunction-sections
diff --git a/lib/dynamicsizehash_concurrent.c b/lib/dynamicsizehash_concurrent.c
new file mode 100644
index 00000000..2d53bec6
--- /dev/null
+++ b/lib/dynamicsizehash_concurrent.c
@@ -0,0 +1,482 @@
+/* Copyright (C) 2000-2019 Red Hat, Inc.
+   This file is part of elfutils.
+   Written by Srdan Milakovic <sm108@rice.edu>, 2019.
+   Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
+
+   This file is free software; you can redistribute it and/or modify
+   it under the terms of either
+
+     * the GNU Lesser General Public License as published by the Free
+       Software Foundation; either version 3 of the License, or (at
+       your option) any later version
+
+   or
+
+     * the GNU General Public License as published by the Free
+       Software Foundation; either version 2 of the License, or (at
+       your option) any later version
+
+   or both in parallel, as here.
+
+   elfutils is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received copies of the GNU General Public License and
+   the GNU Lesser General Public License along with this program.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <system.h>
+#include <pthread.h>
+
+/* Before including this file the following macros must be defined:
+
+   NAME      name of the hash table structure.
+   TYPE      data type of the hash table entries
+ */
+
+
+static size_t
+lookup (NAME *htab, HASHTYPE hval)
+{
+  /* First hash function: simply take the modul but prevent zero.  Small values
+      can skip the division, which helps performance when this is common.  */
+  size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
+
+  HASHTYPE hash;
+
+  hash = atomic_load_explicit(&htab->table[idx].hashval,
+                              memory_order_acquire);
+  if (hash == hval)
+    return idx;
+  else if (hash == 0)
+    return 0;
+
+  /* Second hash function as suggested in [Knuth].  */
+  HASHTYPE second_hash = 1 + hval % (htab->size - 2);
+
+  for(;;)
+    {
+      if (idx <= second_hash)
+          idx = htab->size + idx - second_hash;
+      else
+          idx -= second_hash;
+
+      hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                  memory_order_acquire);
+      if (hash == hval)
+	return idx;
+      else if (hash == 0)
+	return 0;
+    }
+}
+
+static int
+insert_helper (NAME *htab, HASHTYPE hval, TYPE val)
+{
+  /* First hash function: simply take the modul but prevent zero.  Small values
+      can skip the division, which helps performance when this is common.  */
+  size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
+
+  TYPE val_ptr;
+  HASHTYPE hash;
+
+  hash = atomic_load_explicit(&htab->table[idx].hashval,
+                              memory_order_acquire);
+  if (hash == hval)
+    return -1;
+  else if (hash == 0)
+    {
+      val_ptr = NULL;
+      atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
+                                              (uintptr_t *) &val_ptr,
+                                              (uintptr_t) val,
+                                              memory_order_acquire,
+                                              memory_order_acquire);
+
+      if (val_ptr == NULL)
+        {
+          atomic_store_explicit(&htab->table[idx].hashval, hval,
+                                memory_order_release);
+          return 0;
+        }
+      else
+        {
+          do
+            {
+              hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                          memory_order_acquire);
+            }
+          while (hash == 0);
+          if (hash == hval)
+            return -1;
+        }
+    }
+
+  /* Second hash function as suggested in [Knuth].  */
+  HASHTYPE second_hash = 1 + hval % (htab->size - 2);
+
+  for(;;)
+    {
+      if (idx <= second_hash)
+          idx = htab->size + idx - second_hash;
+      else
+          idx -= second_hash;
+
+      hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                  memory_order_acquire);
+      if (hash == hval)
+        return -1;
+      else if (hash == 0)
+        {
+          val_ptr = NULL;
+          atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
+                                                  (uintptr_t *) &val_ptr,
+                                                  (uintptr_t) val,
+                                                  memory_order_acquire,
+                                                  memory_order_acquire);
+
+          if (val_ptr == NULL)
+            {
+              atomic_store_explicit(&htab->table[idx].hashval, hval,
+                                    memory_order_release);
+              return 0;
+            }
+          else
+            {
+              do
+                {
+                  hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                              memory_order_acquire);
+                }
+              while (hash == 0);
+              if (hash == hval)
+                return -1;
+            }
+        }
+    }
+}
+
+#define NO_RESIZING 0u
+#define ALLOCATING_MEMORY 1u
+#define MOVING_DATA 3u
+#define CLEANING 2u
+
+#define STATE_BITS 2u
+#define STATE_INCREMENT (1u << STATE_BITS)
+#define STATE_MASK (STATE_INCREMENT - 1)
+#define GET_STATE(A) ((A) & STATE_MASK)
+
+#define IS_NO_RESIZE_OR_CLEANING(A) (((A) & 0x1u) == 0)
+
+#define GET_ACTIVE_WORKERS(A) ((A) >> STATE_BITS)
+
+#define INITIALIZATION_BLOCK_SIZE 256
+#define MOVE_BLOCK_SIZE 256
+#define CEIL(A, B) (((A) + (B) - 1) / (B))
+
+/* Initializes records and copies the data from the old table.
+   It can share work with other threads */
+static void resize_helper(NAME *htab, int blocking)
+{
+  size_t num_old_blocks = CEIL(htab->old_size, MOVE_BLOCK_SIZE);
+  size_t num_new_blocks = CEIL(htab->size, INITIALIZATION_BLOCK_SIZE);
+
+  size_t my_block;
+  size_t num_finished_blocks = 0;
+
+  while ((my_block = atomic_fetch_add_explicit(&htab->next_init_block, 1,
+                                                memory_order_acquire))
+                                                    < num_new_blocks)
+    {
+      size_t record_it = my_block * INITIALIZATION_BLOCK_SIZE;
+      size_t record_end = (my_block + 1) * INITIALIZATION_BLOCK_SIZE;
+      if (record_end > htab->size)
+          record_end = htab->size;
+
+      while (record_it++ != record_end)
+        {
+          atomic_init(&htab->table[record_it].hashval, (uintptr_t) NULL);
+          atomic_init(&htab->table[record_it].val_ptr, (uintptr_t) NULL);
+        }
+
+      num_finished_blocks++;
+    }
+
+  atomic_fetch_add_explicit(&htab->num_initialized_blocks,
+                            num_finished_blocks, memory_order_release);
+  while (atomic_load_explicit(&htab->num_initialized_blocks,
+                              memory_order_acquire) != num_new_blocks);
+
+  /* All block are initialized, start moving */
+  num_finished_blocks = 0;
+  while ((my_block = atomic_fetch_add_explicit(&htab->next_move_block, 1,
+                                                memory_order_acquire))
+                                                    < num_old_blocks)
+    {
+      size_t record_it = my_block * MOVE_BLOCK_SIZE;
+      size_t record_end = (my_block + 1) * MOVE_BLOCK_SIZE;
+      if (record_end > htab->old_size)
+          record_end = htab->old_size;
+
+      while (record_it++ != record_end)
+        {
+          TYPE val_ptr = (TYPE) atomic_load_explicit(
+              &htab->old_table[record_it].val_ptr,
+              memory_order_acquire);
+          if (val_ptr == NULL)
+              continue;
+
+          HASHTYPE hashval = atomic_load_explicit(
+              &htab->old_table[record_it].hashval,
+              memory_order_acquire);
+          assert(hashval);
+
+          insert_helper(htab, hashval, val_ptr);
+        }
+
+      num_finished_blocks++;
+    }
+
+  atomic_fetch_add_explicit(&htab->num_moved_blocks, num_finished_blocks,
+                            memory_order_release);
+
+  if (blocking)
+      while (atomic_load_explicit(&htab->num_moved_blocks,
+                                  memory_order_acquire) != num_old_blocks);
+}
+
+static void
+resize_master(NAME *htab)
+{
+  htab->old_size = htab->size;
+  htab->old_table = htab->table;
+
+  htab->size = next_prime(htab->size * 2);
+  htab->table = malloc((1 + htab->size) * sizeof(htab->table[0]));
+  assert(htab->table);
+
+  /* Change state from ALLOCATING_MEMORY to MOVING_DATA */
+  atomic_fetch_xor_explicit(&htab->resizing_state,
+                            ALLOCATING_MEMORY ^ MOVING_DATA,
+                            memory_order_release);
+
+  resize_helper(htab, 1);
+
+  /* Change state from MOVING_DATA to CLEANING */
+  size_t resize_state = atomic_fetch_xor_explicit(&htab->resizing_state,
+                                                  MOVING_DATA ^ CLEANING,
+                                                  memory_order_acq_rel);
+  while (GET_ACTIVE_WORKERS(resize_state) != 0)
+      resize_state = atomic_load_explicit(&htab->resizing_state,
+                                          memory_order_acquire);
+
+  /* There are no more active workers */
+  atomic_store_explicit(&htab->next_init_block, 0, memory_order_relaxed);
+  atomic_store_explicit(&htab->num_initialized_blocks, 0,
+                        memory_order_relaxed);
+
+  atomic_store_explicit(&htab->next_move_block, 0, memory_order_relaxed);
+  atomic_store_explicit(&htab->num_moved_blocks, 0, memory_order_relaxed);
+
+  free(htab->old_table);
+
+  /* Change state to NO_RESIZING */
+  atomic_fetch_xor_explicit(&htab->resizing_state, CLEANING ^ NO_RESIZING,
+                            memory_order_relaxed);
+
+}
+
+static void
+resize_worker(NAME *htab)
+{
+  size_t resize_state = atomic_load_explicit(&htab->resizing_state,
+                                              memory_order_acquire);
+
+  /* If the resize has finished */
+  if (IS_NO_RESIZE_OR_CLEANING(resize_state))
+      return;
+
+  /* Register as worker and check if the resize has finished in the meantime*/
+  resize_state = atomic_fetch_add_explicit(&htab->resizing_state,
+                                            STATE_INCREMENT,
+                                            memory_order_acquire);
+  if (IS_NO_RESIZE_OR_CLEANING(resize_state))
+    {
+      atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                                memory_order_relaxed);
+      return;
+    }
+
+  /* Wait while the new table is being allocated. */
+  while (GET_STATE(resize_state) == ALLOCATING_MEMORY)
+      resize_state = atomic_load_explicit(&htab->resizing_state,
+                                          memory_order_acquire);
+
+  /* Check if the resize is done */
+  assert(GET_STATE(resize_state) != NO_RESIZING);
+  if (GET_STATE(resize_state) == CLEANING)
+    {
+      atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                                memory_order_relaxed);
+      return;
+    }
+
+  resize_helper(htab, 0);
+
+  /* Deregister worker */
+  atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                            memory_order_release);
+}
+
+
+int
+#define INIT(name) _INIT (name)
+#define _INIT(name) \
+  name##_init
+INIT(NAME) (NAME *htab, size_t init_size)
+{
+  /* We need the size to be a prime.  */
+  init_size = next_prime (init_size);
+
+  /* Initialize the data structure.  */
+  htab->size = init_size;
+  atomic_init(&htab->filled, 0);
+  atomic_init(&htab->resizing_state, 0);
+
+  atomic_init(&htab->next_init_block, 0);
+  atomic_init(&htab->num_initialized_blocks, 0);
+
+  atomic_init(&htab->next_move_block, 0);
+  atomic_init(&htab->num_moved_blocks, 0);
+
+  pthread_rwlock_init(&htab->resize_rwl, NULL);
+
+  htab->table = (void *) malloc ((init_size + 1) * sizeof (htab->table[0]));
+  if (htab->table == NULL)
+      return -1;
+
+  for (size_t i = 0; i <= init_size; i++)
+    {
+      atomic_init(&htab->table[i].hashval, (uintptr_t) NULL);
+      atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL);
+    }
+
+  return 0;
+}
+
+
+int
+#define FREE(name) _FREE (name)
+#define _FREE(name) \
+name##_free
+FREE(NAME) (NAME *htab)
+{
+  pthread_rwlock_destroy(&htab->resize_rwl);
+  free (htab->table);
+  return 0;
+}
+
+
+int
+#define INSERT(name) _INSERT (name)
+#define _INSERT(name) \
+name##_insert
+INSERT(NAME) (NAME *htab, HASHTYPE hval, TYPE data)
+{
+  int incremented = 0;
+
+  for(;;)
+    {
+      while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
+          resize_worker(htab);
+
+      size_t filled;
+      if (!incremented)
+        {
+          filled = atomic_fetch_add_explicit(&htab->filled, 1,
+                                              memory_order_acquire);
+          incremented = 1;
+        }
+      else
+        {
+          filled = atomic_load_explicit(&htab->filled,
+                                        memory_order_acquire);
+        }
+
+
+      if (100 * filled > 90 * htab->size)
+        {
+          /* Table is filled more than 90%.  Resize the table.  */
+
+          size_t resizing_state = atomic_load_explicit(&htab->resizing_state,
+                                                        memory_order_acquire);
+          if (resizing_state == 0 &&
+              atomic_compare_exchange_strong_explicit(&htab->resizing_state,
+                                                      &resizing_state,
+                                                      ALLOCATING_MEMORY,
+                                                      memory_order_acquire,
+                                                      memory_order_acquire))
+            {
+              /* Master thread */
+              pthread_rwlock_unlock(&htab->resize_rwl);
+
+              pthread_rwlock_wrlock(&htab->resize_rwl);
+              resize_master(htab);
+              pthread_rwlock_unlock(&htab->resize_rwl);
+
+            }
+          else
+            {
+              /* Worker thread */
+              pthread_rwlock_unlock(&htab->resize_rwl);
+              resize_worker(htab);
+            }
+        }
+      else
+        {
+          /* Lock acquired, no need for resize*/
+          break;
+        }
+    }
+
+  int ret_val = insert_helper(htab, hval, data);
+  if (ret_val == -1)
+      atomic_fetch_sub_explicit(&htab->filled, 1, memory_order_relaxed);
+  pthread_rwlock_unlock(&htab->resize_rwl);
+  return ret_val;
+}
+
+
+
+TYPE
+#define FIND(name) _FIND (name)
+#define _FIND(name) \
+  name##_find
+FIND(NAME) (NAME *htab, HASHTYPE hval)
+{
+  while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
+      resize_worker(htab);
+
+  size_t idx;
+
+  /* Make the hash data nonzero.  */
+  hval = hval ?: 1;
+  idx = lookup(htab, hval);
+
+  if (idx == 0)
+    {
+      pthread_rwlock_unlock(&htab->resize_rwl);
+      return NULL;
+    }
+
+  /* get a copy before unlocking the lock */
+  TYPE ret_val = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                             memory_order_relaxed);
+
+  pthread_rwlock_unlock(&htab->resize_rwl);
+  return ret_val;
+}
diff --git a/lib/dynamicsizehash_concurrent.h b/lib/dynamicsizehash_concurrent.h
new file mode 100644
index 00000000..73e66e91
--- /dev/null
+++ b/lib/dynamicsizehash_concurrent.h
@@ -0,0 +1,118 @@
+/* Copyright (C) 2000-2019 Red Hat, Inc.
+   This file is part of elfutils.
+   Written by Srdan Milakovic <sm108@rice.edu>, 2019.
+   Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
+
+   This file is free software; you can redistribute it and/or modify
+   it under the terms of either
+
+     * the GNU Lesser General Public License as published by the Free
+       Software Foundation; either version 3 of the License, or (at
+       your option) any later version
+
+   or
+
+     * the GNU General Public License as published by the Free
+       Software Foundation; either version 2 of the License, or (at
+       your option) any later version
+
+   or both in parallel, as here.
+
+   elfutils is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received copies of the GNU General Public License and
+   the GNU Lesser General Public License along with this program.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <stddef.h>
+#include <pthread.h>
+#include "atomics.h"
+/* Before including this file the following macros must be defined:
+
+   NAME      name of the hash table structure.
+   TYPE      data type of the hash table entries
+
+   The following macros if present select features:
+
+   ITERATE   iterating over the table entries is possible
+   HASHTYPE  integer type for hash values, default unsigned long int
+ */
+
+
+
+#ifndef HASHTYPE
+# define HASHTYPE unsigned long int
+#endif
+
+#ifndef RESIZE_BLOCK_SIZE
+# define RESIZE_BLOCK_SIZE 256
+#endif
+
+/* Defined separately.  */
+extern size_t next_prime (size_t seed);
+
+
+/* Table entry type.  */
+#define _DYNHASHCONENTTYPE(name)       \
+  typedef struct name##_ent         \
+  {                                 \
+    _Atomic(HASHTYPE) hashval;      \
+    atomic_uintptr_t val_ptr;       \
+  } name##_ent
+#define DYNHASHENTTYPE(name) _DYNHASHCONENTTYPE (name)
+DYNHASHENTTYPE (NAME);
+
+/* Type of the dynamic hash table data structure.  */
+#define _DYNHASHCONTYPE(name) \
+typedef struct                                     \
+{                                                  \
+  size_t size;                                     \
+  size_t old_size;                                 \
+  atomic_size_t filled;                            \
+  name##_ent *table;                               \
+  name##_ent *old_table;                           \
+  atomic_size_t resizing_state;                    \
+  atomic_size_t next_init_block;                   \
+  atomic_size_t num_initialized_blocks;            \
+  atomic_size_t next_move_block;                   \
+  atomic_size_t num_moved_blocks;                  \
+  pthread_rwlock_t resize_rwl;                     \
+} name
+#define DYNHASHTYPE(name) _DYNHASHCONTYPE (name)
+DYNHASHTYPE (NAME);
+
+
+
+#define _FUNCTIONS(name)                                            \
+/* Initialize the hash table.  */                                   \
+extern int name##_init (name *htab, size_t init_size);              \
+                                                                    \
+/* Free resources allocated for hash table.  */                     \
+extern int name##_free (name *htab);                                \
+                                                                    \
+/* Insert new entry.  */                                            \
+extern int name##_insert (name *htab, HASHTYPE hval, TYPE data);    \
+                                                                    \
+/* Find entry in hash table.  */                                    \
+extern TYPE name##_find (name *htab, HASHTYPE hval);
+#define FUNCTIONS(name) _FUNCTIONS (name)
+FUNCTIONS (NAME)
+
+
+#ifndef NO_UNDEF
+# undef DYNHASHENTTYPE
+# undef DYNHASHTYPE
+# undef FUNCTIONS
+# undef _FUNCTIONS
+# undef XFUNCTIONS
+# undef _XFUNCTIONS
+# undef NAME
+# undef TYPE
+# undef ITERATE
+# undef COMPARE
+# undef FIRST
+# undef NEXT
+#endif
diff --git a/libdw/ChangeLog b/libdw/ChangeLog
index 036cee61..927793f9 100644
--- a/libdw/ChangeLog
+++ b/libdw/ChangeLog
@@ -1,3 +1,15 @@
+2019-10-28  Mark Wielaard  <mark@klomp.org>
+
+	* dwarf_sig8_hash.h: Include libdw.h. Remove COMPARE. Include
+	dynamicsizehash_concurrent.h.
+	* dwarf_sig8_hash.c: Include dynamicsizehash_concurrent.c.
+	* dwarf_formref_die.c (dwarf_formref_die): Drop NULL argument to
+	Dwarf_Sig8_Hash_find.
+
+2019-08-26  Srđan Milaković  <sm108@rice.edu@rice.edu>
+
+	* dwarf_abbrev_hash.{c,h}: Use the *_concurrent hash table.
+
 2019-10-28  Jonathon Anderson  <jma14@rice.edu>
 
 	* libdw_alloc.c: Added __libdw_alloc_tail.
diff --git a/libdw/dwarf_abbrev_hash.c b/libdw/dwarf_abbrev_hash.c
index f52f5ad5..c2548140 100644
--- a/libdw/dwarf_abbrev_hash.c
+++ b/libdw/dwarf_abbrev_hash.c
@@ -38,7 +38,7 @@
 #define next_prime __libdwarf_next_prime
 extern size_t next_prime (size_t) attribute_hidden;
 
-#include <dynamicsizehash.c>
+#include <dynamicsizehash_concurrent.c>
 
 #undef next_prime
 #define next_prime attribute_hidden __libdwarf_next_prime
diff --git a/libdw/dwarf_abbrev_hash.h b/libdw/dwarf_abbrev_hash.h
index d2f02ccc..a368c598 100644
--- a/libdw/dwarf_abbrev_hash.h
+++ b/libdw/dwarf_abbrev_hash.h
@@ -32,8 +32,7 @@
 
 #define NAME Dwarf_Abbrev_Hash
 #define TYPE Dwarf_Abbrev *
-#define COMPARE(a, b) (0)
 
-#include <dynamicsizehash.h>
+#include <dynamicsizehash_concurrent.h>
 
 #endif	/* dwarf_abbrev_hash.h */
diff --git a/libdw/dwarf_formref_die.c b/libdw/dwarf_formref_die.c
index f196331a..48ba8194 100644
--- a/libdw/dwarf_formref_die.c
+++ b/libdw/dwarf_formref_die.c
@@ -83,7 +83,7 @@ dwarf_formref_die (Dwarf_Attribute *attr, Dwarf_Die *result)
 	 have to match in the type unit headers.  */
 
       uint64_t sig = read_8ubyte_unaligned (cu->dbg, attr->valp);
-      cu = Dwarf_Sig8_Hash_find (&cu->dbg->sig8_hash, sig, NULL);
+      cu = Dwarf_Sig8_Hash_find (&cu->dbg->sig8_hash, sig);
       if (cu == NULL)
 	{
 	  /* Not seen before.  We have to scan through the type units.
diff --git a/libdw/dwarf_getabbrev.c b/libdw/dwarf_getabbrev.c
index 6a7e981b..7e767fc1 100644
--- a/libdw/dwarf_getabbrev.c
+++ b/libdw/dwarf_getabbrev.c
@@ -83,7 +83,7 @@ __libdw_getabbrev (Dwarf *dbg, struct Dwarf_CU *cu, Dwarf_Off offset,
   bool foundit = false;
   Dwarf_Abbrev *abb = NULL;
   if (cu == NULL
-      || (abb = Dwarf_Abbrev_Hash_find (&cu->abbrev_hash, code, NULL)) == NULL)
+      || (abb = Dwarf_Abbrev_Hash_find (&cu->abbrev_hash, code)) == NULL)
     {
       if (result == NULL)
 	abb = libdw_typed_alloc (dbg, Dwarf_Abbrev);
diff --git a/libdw/dwarf_sig8_hash.c b/libdw/dwarf_sig8_hash.c
index 043cac78..777f9ebc 100644
--- a/libdw/dwarf_sig8_hash.c
+++ b/libdw/dwarf_sig8_hash.c
@@ -38,4 +38,4 @@
 #define next_prime __libdwarf_next_prime
 extern size_t next_prime (size_t) attribute_hidden;
 
-#include <dynamicsizehash.c>
+#include <dynamicsizehash_concurrent.c>
diff --git a/libdw/dwarf_sig8_hash.h b/libdw/dwarf_sig8_hash.h
index 705ffbcd..c399919a 100644
--- a/libdw/dwarf_sig8_hash.h
+++ b/libdw/dwarf_sig8_hash.h
@@ -29,10 +29,15 @@
 #ifndef _DWARF_SIG8_HASH_H
 #define _DWARF_SIG8_HASH_H	1
 
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "libdw.h"
+
 #define NAME Dwarf_Sig8_Hash
 #define TYPE struct Dwarf_CU *
-#define COMPARE(a, b) (0)
 
-#include <dynamicsizehash.h>
+#include <dynamicsizehash_concurrent.h>
 
 #endif	/* dwarf_sig8_hash.h */
diff --git a/libdw/dwarf_tag.c b/libdw/dwarf_tag.c
index 331eaa0d..d784970c 100644
--- a/libdw/dwarf_tag.c
+++ b/libdw/dwarf_tag.c
@@ -45,7 +45,7 @@ __libdw_findabbrev (struct Dwarf_CU *cu, unsigned int code)
     return DWARF_END_ABBREV;
 
   /* See whether the entry is already in the hash table.  */
-  abb = Dwarf_Abbrev_Hash_find (&cu->abbrev_hash, code, NULL);
+  abb = Dwarf_Abbrev_Hash_find (&cu->abbrev_hash, code);
   if (abb == NULL)
     while (cu->last_abbrev_offset != (size_t) -1l)
       {
-- 
2.24.0



More information about the Elfutils-devel mailing list