This is the mail archive of the elfutils-devel@sourceware.org mailing list for the elfutils project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 3/3] lib + libdw: Add and use a concurrent version of the dynamic-size hash table.


From: Srđan Milaković <sm108@rice.edu>

Signed-off-by: Srđan Milaković <sm108@rice.edu>
---
 lib/ChangeLog                    |   5 +
 lib/Makefile.am                  |   4 +-
 lib/dynamicsizehash_concurrent.c | 522 +++++++++++++++++++++++++++++++
 lib/dynamicsizehash_concurrent.h | 118 +++++++
 libdw/ChangeLog                  |   4 +
 libdw/dwarf_abbrev_hash.c        |   2 +-
 libdw/dwarf_abbrev_hash.h        |   2 +-
 7 files changed, 653 insertions(+), 4 deletions(-)
 create mode 100644 lib/dynamicsizehash_concurrent.c
 create mode 100644 lib/dynamicsizehash_concurrent.h

diff --git a/lib/ChangeLog b/lib/ChangeLog
index 3799c3aa..a209729a 100644
--- a/lib/ChangeLog
+++ b/lib/ChangeLog
@@ -1,3 +1,8 @@
+2019-08-26  Srđan Milaković  <sm108@rice.edu>
+
+	* dynamicsizehash_concurrent.{c,h}: New files.
+	* Makefile.am (noinst_HEADERS): Added dynamicsizehash_concurrent.h.
+
 2019-08-25  Jonathon Anderson  <jma14@rice.edu>
 
 	* stdatomic-fbsd.h: New file, taken from FreeBSD.
diff --git a/lib/Makefile.am b/lib/Makefile.am
index 3086cf06..97bf7329 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -39,8 +39,8 @@ libeu_a_SOURCES = xstrdup.c xstrndup.c xmalloc.c next_prime.c \
 
 noinst_HEADERS = fixedsizehash.h libeu.h system.h dynamicsizehash.h list.h \
 		 eu-config.h color.h printversion.h bpf.h \
-		 atomics.h stdatomic-fbsd.h
-EXTRA_DIST = dynamicsizehash.c
+		 atomics.h stdatomic-fbsd.h dynamicsizehash_concurrent.h
+EXTRA_DIST = dynamicsizehash.c dynamicsizehash_concurrent.c
 
 if !GPROF
 xmalloc_CFLAGS = -ffunction-sections
diff --git a/lib/dynamicsizehash_concurrent.c b/lib/dynamicsizehash_concurrent.c
new file mode 100644
index 00000000..d645b143
--- /dev/null
+++ b/lib/dynamicsizehash_concurrent.c
@@ -0,0 +1,522 @@
+/* Copyright (C) 2000-2019 Red Hat, Inc.
+   This file is part of elfutils.
+   Written by Srdan Milakovic <sm108@rice.edu>, 2019.
+   Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
+
+   This file is free software; you can redistribute it and/or modify
+   it under the terms of either
+
+     * the GNU Lesser General Public License as published by the Free
+       Software Foundation; either version 3 of the License, or (at
+       your option) any later version
+
+   or
+
+     * the GNU General Public License as published by the Free
+       Software Foundation; either version 2 of the License, or (at
+       your option) any later version
+
+   or both in parallel, as here.
+
+   elfutils is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received copies of the GNU General Public License and
+   the GNU Lesser General Public License along with this program.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <system.h>
+#include <pthread.h>
+
+/* Before including this file the following macros must be defined:
+
+   NAME      name of the hash table structure.
+   TYPE      data type of the hash table entries
+   COMPARE   comparison function taking two pointers to TYPE objects
+
+   The following macros if present select features:
+
+   ITERATE   iterating over the table entries is possible
+   REVERSE   iterate in reverse order of insert
+ */
+
+
+static size_t
+lookup (NAME *htab, HASHTYPE hval, TYPE val __attribute__ ((unused)))
+{
+  /* First hash function: simply take the modul but prevent zero.  Small values
+      can skip the division, which helps performance when this is common.  */
+  size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
+
+#if COMPARE != 0  /* A handful of tables don't actually compare the entries in
+                    the table, they instead rely on the hash.  In that case, we
+                    can skip parts that relate to the value. */
+  TYPE val_ptr;
+#endif
+  HASHTYPE hash;
+
+  hash = atomic_load_explicit(&htab->table[idx].hashval,
+                              memory_order_acquire);
+  if (hash == hval)
+    {
+#if COMPARE == 0
+      return idx;
+#else
+      val_ptr = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                            memory_order_acquire);
+      if (COMPARE(val_ptr, val) == 0)
+          return idx;
+#endif
+    }
+  else if (hash == 0)
+    {
+      return 0;
+    }
+
+  /* Second hash function as suggested in [Knuth].  */
+  HASHTYPE second_hash = 1 + hval % (htab->size - 2);
+
+  for(;;)
+    {
+      if (idx <= second_hash)
+          idx = htab->size + idx - second_hash;
+      else
+          idx -= second_hash;
+
+      hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                  memory_order_acquire);
+      if (hash == hval)
+        {
+#if COMPARE == 0
+          return idx;
+#else
+          val_ptr = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                                memory_order_acquire);
+          if (COMPARE(val_ptr, val) == 0)
+              return idx;
+#endif
+        }
+      else if (hash == 0)
+        {
+          return 0;
+        }
+    }
+}
+
+static int
+insert_helper (NAME *htab, HASHTYPE hval, TYPE val)
+{
+  /* First hash function: simply take the modul but prevent zero.  Small values
+      can skip the division, which helps performance when this is common.  */
+  size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
+
+  TYPE val_ptr;
+  HASHTYPE hash;
+
+  hash = atomic_load_explicit(&htab->table[idx].hashval,
+                              memory_order_acquire);
+  if (hash == hval)
+    {
+      val_ptr = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                            memory_order_acquire);
+      if (COMPARE(val_ptr, val) != 0)
+          return -1;
+    }
+  else if (hash == 0)
+    {
+      val_ptr = NULL;
+      atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
+                                              (uintptr_t *) &val_ptr,
+                                              (uintptr_t) val,
+                                              memory_order_acquire,
+                                              memory_order_acquire);
+
+      if (val_ptr == NULL)
+        {
+          atomic_store_explicit(&htab->table[idx].hashval, hval,
+                                memory_order_release);
+          return 0;
+        }
+      else
+        {
+          do
+            {
+              hash = atomic_load_explicit(&htab->table[idx].val_ptr,
+                                          memory_order_acquire);
+            }
+          while (hash == 0);
+        }
+    }
+
+  /* Second hash function as suggested in [Knuth].  */
+  HASHTYPE second_hash = 1 + hval % (htab->size - 2);
+
+  for(;;)
+    {
+      if (idx <= second_hash)
+          idx = htab->size + idx - second_hash;
+      else
+          idx -= second_hash;
+
+      hash = atomic_load_explicit(&htab->table[idx].hashval,
+                                  memory_order_acquire);
+      if (hash == hval)
+        {
+          val_ptr = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                                memory_order_acquire);
+          if (COMPARE(val_ptr, val) != 0)
+              return -1;
+        }
+      else if (hash == 0)
+        {
+          val_ptr = NULL;
+          atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
+                                                  (uintptr_t *) &val_ptr,
+                                                  (uintptr_t) val,
+                                                  memory_order_acquire,
+                                                  memory_order_acquire);
+
+          if (val_ptr == NULL)
+            {
+              atomic_store_explicit(&htab->table[idx].hashval, hval,
+                                    memory_order_release);
+              return 0;
+            }
+          else
+            {
+              do
+                {
+                  hash = atomic_load_explicit(&htab->table[idx].val_ptr,
+                                              memory_order_acquire);
+                }
+              while (hash == 0);
+            }
+        }
+    }
+}
+
+#define NO_RESIZING 0u
+#define ALLOCATING_MEMORY 1u
+#define MOVING_DATA 3u
+#define CLEANING 2u
+
+#define STATE_BITS 2u
+#define STATE_INCREMENT (1u << STATE_BITS)
+#define STATE_MASK (STATE_INCREMENT - 1)
+#define GET_STATE(A) ((A) & STATE_MASK)
+
+#define IS_NO_RESIZE_OR_CLEANING(A) (((A) & 0x1u) == 0)
+
+#define GET_ACTIVE_WORKERS(A) ((A) >> STATE_BITS)
+
+#define INITIALIZATION_BLOCK_SIZE 256
+#define MOVE_BLOCK_SIZE 256
+#define CEIL(A, B) (((A) + (B) - 1) / (B))
+
+/* Initializes records and copies the data from the old table.
+   It can share work with other threads */
+static void resize_helper(NAME *htab, int blocking)
+{
+  size_t num_old_blocks = CEIL(htab->old_size, MOVE_BLOCK_SIZE);
+  size_t num_new_blocks = CEIL(htab->size, INITIALIZATION_BLOCK_SIZE);
+
+  size_t my_block;
+  size_t num_finished_blocks = 0;
+
+  while ((my_block = atomic_fetch_add_explicit(&htab->next_init_block, 1,
+                                                memory_order_acquire))
+                                                    < num_new_blocks)
+    {
+      size_t record_it = my_block * INITIALIZATION_BLOCK_SIZE;
+      size_t record_end = (my_block + 1) * INITIALIZATION_BLOCK_SIZE;
+      if (record_end > htab->size)
+          record_end = htab->size;
+
+      while (record_it++ != record_end)
+        {
+          atomic_init(&htab->table[record_it].hashval, (uintptr_t) NULL);
+          atomic_init(&htab->table[record_it].val_ptr, (uintptr_t) NULL);
+        }
+
+      num_finished_blocks++;
+    }
+
+  atomic_fetch_add_explicit(&htab->num_initialized_blocks,
+                            num_finished_blocks, memory_order_release);
+  while (atomic_load_explicit(&htab->num_initialized_blocks,
+                              memory_order_acquire) != num_new_blocks);
+
+  /* All block are initialized, start moving */
+  num_finished_blocks = 0;
+  while ((my_block = atomic_fetch_add_explicit(&htab->next_move_block, 1,
+                                                memory_order_acquire))
+                                                    < num_old_blocks)
+    {
+      size_t record_it = my_block * MOVE_BLOCK_SIZE;
+      size_t record_end = (my_block + 1) * MOVE_BLOCK_SIZE;
+      if (record_end > htab->old_size)
+          record_end = htab->old_size;
+
+      while (record_it++ != record_end)
+        {
+          TYPE val_ptr = (TYPE) atomic_load_explicit(
+              &htab->old_table[record_it].val_ptr,
+              memory_order_acquire);
+          if (val_ptr == NULL)
+              continue;
+
+          HASHTYPE hashval = atomic_load_explicit(
+              &htab->old_table[record_it].hashval,
+              memory_order_acquire);
+          assert(hashval);
+
+          insert_helper(htab, hashval, val_ptr);
+        }
+
+      num_finished_blocks++;
+    }
+
+  atomic_fetch_add_explicit(&htab->num_moved_blocks, num_finished_blocks,
+                            memory_order_release);
+
+  if (blocking)
+      while (atomic_load_explicit(&htab->num_moved_blocks,
+                                  memory_order_acquire) != num_old_blocks);
+}
+
+static void
+resize_master(NAME *htab)
+{
+  htab->old_size = htab->size;
+  htab->old_table = htab->table;
+
+  htab->size = next_prime(htab->size * 2);
+  htab->table = malloc((1 + htab->size) * sizeof(htab->table[0]));
+  assert(htab->table);
+
+  /* Change state from ALLOCATING_MEMORY to MOVING_DATA */
+  atomic_fetch_xor_explicit(&htab->resizing_state,
+                            ALLOCATING_MEMORY ^ MOVING_DATA,
+                            memory_order_release);
+
+  resize_helper(htab, 1);
+
+  /* Change state from MOVING_DATA to CLEANING */
+  size_t resize_state = atomic_fetch_xor_explicit(&htab->resizing_state,
+                                                  MOVING_DATA ^ CLEANING,
+                                                  memory_order_acq_rel);
+  while (GET_ACTIVE_WORKERS(resize_state) != 0)
+      resize_state = atomic_load_explicit(&htab->resizing_state,
+                                          memory_order_acquire);
+
+  /* There are no more active workers */
+  atomic_store_explicit(&htab->next_init_block, 0, memory_order_relaxed);
+  atomic_store_explicit(&htab->num_initialized_blocks, 0,
+                        memory_order_relaxed);
+
+  atomic_store_explicit(&htab->next_move_block, 0, memory_order_relaxed);
+  atomic_store_explicit(&htab->num_moved_blocks, 0, memory_order_relaxed);
+
+  free(htab->old_table);
+
+  /* Change state to NO_RESIZING */
+  atomic_fetch_xor_explicit(&htab->resizing_state, CLEANING ^ NO_RESIZING,
+                            memory_order_relaxed);
+
+}
+
+static void
+resize_worker(NAME *htab)
+{
+  size_t resize_state = atomic_load_explicit(&htab->resizing_state,
+                                              memory_order_acquire);
+
+  /* If the resize has finished */
+  if (IS_NO_RESIZE_OR_CLEANING(resize_state))
+      return;
+
+  /* Register as worker and check if the resize has finished in the meantime*/
+  resize_state = atomic_fetch_add_explicit(&htab->resizing_state,
+                                            STATE_INCREMENT,
+                                            memory_order_acquire);
+  if (IS_NO_RESIZE_OR_CLEANING(resize_state))
+    {
+      atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                                memory_order_relaxed);
+      return;
+    }
+
+  /* Wait while the new table is being allocated. */
+  while (GET_STATE(resize_state) == ALLOCATING_MEMORY)
+      resize_state = atomic_load_explicit(&htab->resizing_state,
+                                          memory_order_acquire);
+
+  /* Check if the resize is done */
+  assert(GET_STATE(resize_state) != NO_RESIZING);
+  if (GET_STATE(resize_state) == CLEANING)
+    {
+      atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                                memory_order_relaxed);
+      return;
+    }
+
+  resize_helper(htab, 0);
+
+  /* Deregister worker */
+  atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
+                            memory_order_release);
+}
+
+
+int
+#define INIT(name) _INIT (name)
+#define _INIT(name) \
+  name##_init
+INIT(NAME) (NAME *htab, size_t init_size)
+{
+  /* We need the size to be a prime.  */
+  init_size = next_prime (init_size);
+
+  /* Initialize the data structure.  */
+  htab->size = init_size;
+  atomic_init(&htab->filled, 0);
+  atomic_init(&htab->resizing_state, 0);
+
+  atomic_init(&htab->next_init_block, 0);
+  atomic_init(&htab->num_initialized_blocks, 0);
+
+  atomic_init(&htab->next_move_block, 0);
+  atomic_init(&htab->num_moved_blocks, 0);
+
+  pthread_rwlock_init(&htab->resize_rwl, NULL);
+
+  htab->table = (void *) malloc ((init_size + 1) * sizeof (htab->table[0]));
+  if (htab->table == NULL)
+      return -1;
+
+  for (size_t i = 0; i <= init_size; i++)
+    {
+      atomic_init(&htab->table[i].hashval, (uintptr_t) NULL);
+      atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL);
+    }
+
+  return 0;
+}
+
+
+int
+#define FREE(name) _FREE (name)
+#define _FREE(name) \
+name##_free
+FREE(NAME) (NAME *htab)
+{
+  pthread_rwlock_destroy(&htab->resize_rwl);
+  free (htab->table);
+  return 0;
+}
+
+
+int
+#define INSERT(name) _INSERT (name)
+#define _INSERT(name) \
+name##_insert
+INSERT(NAME) (NAME *htab, HASHTYPE hval, TYPE data)
+{
+  int incremented = 0;
+
+  for(;;)
+    {
+      while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
+          resize_worker(htab);
+
+      size_t filled;
+      if (!incremented)
+        {
+          filled = atomic_fetch_add_explicit(&htab->filled, 1,
+                                              memory_order_acquire);
+          incremented = 1;
+        }
+      else
+        {
+          filled = atomic_load_explicit(&htab->filled,
+                                        memory_order_acquire);
+        }
+
+
+      if (100 * filled > 90 * htab->size)
+        {
+          /* Table is filled more than 90%.  Resize the table.  */
+
+          size_t resizing_state = atomic_load_explicit(&htab->resizing_state,
+                                                        memory_order_acquire);
+          if (resizing_state == 0 &&
+              atomic_compare_exchange_strong_explicit(&htab->resizing_state,
+                                                      &resizing_state,
+                                                      ALLOCATING_MEMORY,
+                                                      memory_order_acquire,
+                                                      memory_order_acquire))
+            {
+              /* Master thread */
+              pthread_rwlock_unlock(&htab->resize_rwl);
+
+              pthread_rwlock_wrlock(&htab->resize_rwl);
+              resize_master(htab);
+              pthread_rwlock_unlock(&htab->resize_rwl);
+
+            }
+          else
+            {
+              /* Worker thread */
+              pthread_rwlock_unlock(&htab->resize_rwl);
+              resize_worker(htab);
+            }
+        }
+      else
+        {
+          /* Lock acquired, no need for resize*/
+          break;
+        }
+    }
+
+  int ret_val = insert_helper(htab, hval, data);
+  if (ret_val == -1)
+      atomic_fetch_sub_explicit(&htab->filled, 1, memory_order_relaxed);
+  pthread_rwlock_unlock(&htab->resize_rwl);
+  return ret_val;
+}
+
+
+
+TYPE
+#define FIND(name) _FIND (name)
+#define _FIND(name) \
+  name##_find
+FIND(NAME) (NAME *htab, HASHTYPE hval, TYPE val)
+{
+  while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
+      resize_worker(htab);
+
+  size_t idx;
+
+  /* Make the hash data nonzero.  */
+  hval = hval ?: 1;
+  idx = lookup(htab, hval, val);
+
+  if (idx == 0)
+    {
+      pthread_rwlock_unlock(&htab->resize_rwl);
+      return NULL;
+    }
+
+  /* get a copy before unlocking the lock */
+  TYPE ret_val = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
+                                             memory_order_relaxed);
+
+  pthread_rwlock_unlock(&htab->resize_rwl);
+  return ret_val;
+}
+
diff --git a/lib/dynamicsizehash_concurrent.h b/lib/dynamicsizehash_concurrent.h
new file mode 100644
index 00000000..a137cbd0
--- /dev/null
+++ b/lib/dynamicsizehash_concurrent.h
@@ -0,0 +1,118 @@
+/* Copyright (C) 2000-2019 Red Hat, Inc.
+   This file is part of elfutils.
+   Written by Srdan Milakovic <sm108@rice.edu>, 2019.
+   Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
+
+   This file is free software; you can redistribute it and/or modify
+   it under the terms of either
+
+     * the GNU Lesser General Public License as published by the Free
+       Software Foundation; either version 3 of the License, or (at
+       your option) any later version
+
+   or
+
+     * the GNU General Public License as published by the Free
+       Software Foundation; either version 2 of the License, or (at
+       your option) any later version
+
+   or both in parallel, as here.
+
+   elfutils is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received copies of the GNU General Public License and
+   the GNU Lesser General Public License along with this program.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <stddef.h>
+#include <pthread.h>
+#include "atomics.h"
+/* Before including this file the following macros must be defined:
+
+   NAME      name of the hash table structure.
+   TYPE      data type of the hash table entries
+
+   The following macros if present select features:
+
+   ITERATE   iterating over the table entries is possible
+   HASHTYPE  integer type for hash values, default unsigned long int
+ */
+
+
+
+#ifndef HASHTYPE
+# define HASHTYPE unsigned long int
+#endif
+
+#ifndef RESIZE_BLOCK_SIZE
+# define RESIZE_BLOCK_SIZE 256
+#endif
+
+/* Defined separately.  */
+extern size_t next_prime (size_t seed);
+
+
+/* Table entry type.  */
+#define _DYNHASHCONENTTYPE(name)       \
+  typedef struct name##_ent         \
+  {                                 \
+    _Atomic(HASHTYPE) hashval;      \
+    atomic_uintptr_t val_ptr;       \
+  } name##_ent
+#define DYNHASHENTTYPE(name) _DYNHASHCONENTTYPE (name)
+DYNHASHENTTYPE (NAME);
+
+/* Type of the dynamic hash table data structure.  */
+#define _DYNHASHCONTYPE(name) \
+typedef struct                                     \
+{                                                  \
+  size_t size;                                     \
+  size_t old_size;                                 \
+  atomic_size_t filled;                            \
+  name##_ent *table;                               \
+  name##_ent *old_table;                           \
+  atomic_size_t resizing_state;                    \
+  atomic_size_t next_init_block;                   \
+  atomic_size_t num_initialized_blocks;            \
+  atomic_size_t next_move_block;                   \
+  atomic_size_t num_moved_blocks;                  \
+  pthread_rwlock_t resize_rwl;                     \
+} name
+#define DYNHASHTYPE(name) _DYNHASHCONTYPE (name)
+DYNHASHTYPE (NAME);
+
+
+
+#define _FUNCTIONS(name)                                            \
+/* Initialize the hash table.  */                                   \
+extern int name##_init (name *htab, size_t init_size);              \
+                                                                    \
+/* Free resources allocated for hash table.  */                     \
+extern int name##_free (name *htab);                                \
+                                                                    \
+/* Insert new entry.  */                                            \
+extern int name##_insert (name *htab, HASHTYPE hval, TYPE data);    \
+                                                                    \
+/* Find entry in hash table.  */                                    \
+extern TYPE name##_find (name *htab, HASHTYPE hval, TYPE val);
+#define FUNCTIONS(name) _FUNCTIONS (name)
+FUNCTIONS (NAME)
+
+
+#ifndef NO_UNDEF
+# undef DYNHASHENTTYPE
+# undef DYNHASHTYPE
+# undef FUNCTIONS
+# undef _FUNCTIONS
+# undef XFUNCTIONS
+# undef _XFUNCTIONS
+# undef NAME
+# undef TYPE
+# undef ITERATE
+# undef COMPARE
+# undef FIRST
+# undef NEXT
+#endif
diff --git a/libdw/ChangeLog b/libdw/ChangeLog
index ec809070..25616277 100644
--- a/libdw/ChangeLog
+++ b/libdw/ChangeLog
@@ -1,3 +1,7 @@
+2019-08-26  Srđan Milaković  <sm108@rice.edu@rice.edu>
+
+	* dwarf_abbrev_hash.{c,h}: Use the *_concurrent hash table.
+
 2019-08-26  Jonathon Anderson  <jma14@rice.edu>
 
 	* libdw_alloc.c (__libdw_allocate): Added thread-safe stack allocator.
diff --git a/libdw/dwarf_abbrev_hash.c b/libdw/dwarf_abbrev_hash.c
index f52f5ad5..c2548140 100644
--- a/libdw/dwarf_abbrev_hash.c
+++ b/libdw/dwarf_abbrev_hash.c
@@ -38,7 +38,7 @@
 #define next_prime __libdwarf_next_prime
 extern size_t next_prime (size_t) attribute_hidden;
 
-#include <dynamicsizehash.c>
+#include <dynamicsizehash_concurrent.c>
 
 #undef next_prime
 #define next_prime attribute_hidden __libdwarf_next_prime
diff --git a/libdw/dwarf_abbrev_hash.h b/libdw/dwarf_abbrev_hash.h
index d2f02ccc..bc3d62c7 100644
--- a/libdw/dwarf_abbrev_hash.h
+++ b/libdw/dwarf_abbrev_hash.h
@@ -34,6 +34,6 @@
 #define TYPE Dwarf_Abbrev *
 #define COMPARE(a, b) (0)
 
-#include <dynamicsizehash.h>
+#include <dynamicsizehash_concurrent.h>
 
 #endif	/* dwarf_abbrev_hash.h */
-- 
2.18.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]