* runtime/dyninst/tls_data.c: New file.
* runtime/stat.c (struct _Stat): Add a tls_data_container_t structure.
(_stp_stat_tls_object_init): New function.
(_stp_stat_tls_object_free): Ditto.
(_stp_stat_init): Instead of directly allocating percpu data, for
dyninst set up tls data to be created when accessed by calling
_stp_tls_data_container_init().
(_stp_stat_del): For dyninst, call _stp_tls_data_container_cleanup() to
remove all the tls data.
(_stp_stat_add): For dyninst, get the proper tls stat object.
(_stp_stat_get_cpu): Deleted unused function.
(_stp_stat_get): For dyninst, get the proper tls stat objects.
(_stp_stat_clear): For dyninst, clear the stat in each thread's tls data.
* runtime/stat.h (struct stat_data): Add a tls_data_object_t structure.
* runtime/map.c (_stp_map_tls_object_init): New function.
(_stp_map_tls_object_free): Ditto.
(_stp_pmap_new): Instead of directly allocating percpu data, for dyninst
set up tls data to be created when accessed by calling
_stp_tls_data_container_init().
(_stp_pmap_clear): For dyninst, clear the map in each thread's tls data.
(_stp_pmap_del): For dyninst, call _stp_tls_data_container_cleanup() to
remove all the tls data.
(_stp_pmap_agg): Add dyninst support.
* runtime/map.h (struct map_root): Add a tls_data_object_t structure.
(struct pmap): Add a tls_data_container_t structure.
* runtime/map-stat.c (_stp_hstat_tls_object_init): New function.
(_stp_pmap_new_hstat_linear): For dyninst, override the standard tls
data object init function with _stp_hstat_tls_object_init(), which knows
how to handle hstats.
(_stp_pmap_new_hstat_log): Ditto.
* runtime/pmap-gen.c (_stp_pmap_tls_object_init): New function.
(_stp_pmap_new): For dyninst, override the standard tls
data object init function with _stp_pmap_tls_object_init(), which knows
how to handle pmaps.
(_stp_pmap_set): For dyninst, get the proper tls pmap object.
(_stp_pmap_add): Ditto.
(_stp_pmap_get_cpu): Ditto.
(_stp_pmap_get): Ditto.
(_stp_pmap_del): Ditto.
* runtime/dyninst/linux_defs.h: Added container_of(), list_entry(),
list_for_each_entry(), and list_for_each_entry_safe().
#ifndef _STAPDYN_LINUX_DEFS_H_
#define _STAPDYN_LINUX_DEFS_H_
+#include <stddef.h>
#include <unistd.h>
#include "linux_hash.h"
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
#define __must_be_array(arr) 0
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
return head->next == head;
}
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = NULL;
--- /dev/null
+/* -*- linux-c -*-
+ * TLS Data Functions
+ * Copyright (C) 2012 Red Hat Inc.
+ *
+ * This file is part of systemtap, and is free software. You can
+ * redistribute it and/or modify it under the terms of the GNU General
+ * Public License (GPL); either version 2, or (at your option) any
+ * later version.
+ */
+
+#ifndef _TLS_DATA_C_
+#define _TLS_DATA_C_
+
+#include <pthread.h>
+#include <errno.h>
+
+struct tls_data_object_t;
+
+struct tls_data_container_t {
+ pthread_key_t key; /* key indexing TLS objects */
+ size_t size; /* allocated size of a new TLS object */
+ struct list_head head; /* list of tls_data_object_t structs */
+ pthread_mutex_t lock; /* lock protecting list */
+ int (*init_function)(struct tls_data_object_t *);
+ void (*free_function)(struct tls_data_object_t *);
+};
+
+struct tls_data_object_t {
+ struct list_head list;
+ struct tls_data_container_t *container;
+};
+
+#define TLS_DATA_CONTAINER_LOCK(con) pthread_mutex_lock(&(con)->lock)
+#define TLS_DATA_CONTAINER_UNLOCK(con) pthread_mutex_unlock(&(con)->lock)
+
+#define for_each_tls_data(obj, container) \
+ list_for_each_entry((obj), &(container)->head, list)
+
+#define for_each_tls_data_safe(obj, n, container) \
+ list_for_each_entry_safe((obj), (n), &(container)->head, list)
+
+static void _stp_tls_free_per_thread_ptr(void *addr)
+{
+ struct tls_data_object_t *obj = addr;
+
+ if (obj != NULL) {
+ struct tls_data_container_t *container = obj->container;
+
+ /* Remove this object from the container's list of objects */
+ if (container) {
+ pthread_mutex_lock(&container->lock);
+ list_del(&obj->list);
+
+ /* Give the code above us a chance to cleanup. */
+ if (container->free_function)
+ container->free_function(obj);
+ }
+
+ /* Note that this free() call only works correctly if
+ * the struct tls_data_object is the first thing in
+ * its containing structure. */
+ free(obj);
+
+ if (container)
+ pthread_mutex_unlock(&container->lock);
+ }
+}
+
+static struct tls_data_object_t *
+_stp_tls_get_per_thread_ptr(struct tls_data_container_t *container)
+{
+ /* See if we've already got an object for this thread. */
+ struct tls_data_object_t *obj = pthread_getspecific(container->key);
+
+ /* If we haven't set up an tls object instance for the key for
+ * this thread yet, allocate one. */
+ if (obj == NULL) {
+ int rc;
+
+ /* The real alloc_percpu() allocates zero-filled
+ * memory, so we need to so the same. */
+ obj = calloc(container->size, 1);
+ if (obj == NULL) {
+ _stp_error("Couldn't allocate tls object memory: %d\n",
+ errno);
+ goto exit;
+ }
+
+ /* Give the code above us a chance to initialize the
+ * newly created object. */
+ obj->container = container;
+ if (container->init_function) {
+ if (container->init_function(obj) != 0) {
+ free(obj);
+ obj = NULL;
+ goto exit;
+ }
+ }
+
+ /* Inform pthreads about this instance. */
+ pthread_mutex_lock(&container->lock);
+ if ((rc = pthread_setspecific(container->key, obj)) == 0) {
+ /* Add obj to container's list of objs (for
+ * use in looping over all threads). */
+ list_add(&obj->list, &container->head);
+ }
+ else {
+ _stp_error("Couldn't setspecific on tls key: %d\n",
+ rc);
+
+ /* Give the code above us a chance to cleanup. */
+ if (container->free_function)
+ container->free_function(obj);
+
+ free(obj);
+ obj = NULL;
+ }
+ pthread_mutex_unlock(&container->lock);
+ }
+exit:
+ return obj;
+}
+
+static void
+_stp_tls_data_container_update(struct tls_data_container_t *container,
+ int (*init_function)(struct tls_data_object_t *),
+ void (*free_function)(struct tls_data_object_t *))
+{
+ container->init_function = init_function;
+ container->free_function = free_function;
+}
+
+static int
+_stp_tls_data_container_init(struct tls_data_container_t *container,
+ size_t size,
+ int (*init_function)(struct tls_data_object_t *),
+ void (*free_function)(struct tls_data_object_t *))
+{
+ int rc;
+
+ INIT_LIST_HEAD(&container->head);
+ if ((rc = pthread_mutex_init(&container->lock, NULL)) != 0) {
+ _stp_error("Couldn't init tls mutex: %d\n", rc);
+ return 1;
+ }
+ if ((rc = pthread_key_create(&container->key,
+ &_stp_tls_free_per_thread_ptr)) != 0) {
+ _stp_error("Couldn't create tls key: %d\n", rc);
+ (void)pthread_mutex_destroy(&container->lock);
+ return 1;
+ }
+
+ container->size = size;
+ container->init_function = init_function;
+ container->free_function = free_function;
+ return 0;
+}
+
+static void
+_stp_tls_data_container_cleanup(struct tls_data_container_t *container)
+{
+ struct tls_data_object_t *obj, *n;
+
+ TLS_DATA_CONTAINER_LOCK(container);
+ (void) pthread_key_delete(container->key);
+ for_each_tls_data_safe(obj, n, container) {
+ list_del(&obj->list);
+
+ /* Give the code above us a chance to cleanup. */
+ if (container->free_function)
+ container->free_function(obj);
+
+ free(obj);
+ }
+ TLS_DATA_CONTAINER_UNLOCK(container);
+ (void)pthread_mutex_destroy(&container->lock);
+}
+#endif /* _TLS_DATA_C_ */
/* -*- linux-c -*-
* map functions to handle statistics
- * Copyright (C) 2005 Red Hat Inc.
+ * Copyright (C) 2005, 2012 Red Hat Inc.
*
* This file is part of systemtap, and is free software. You can
* redistribute it and/or modify it under the terms of the GNU General
return m;
}
+#ifndef __KERNEL__
+static int _stp_map_tls_object_init(struct tls_data_object_t *obj);
+static void _stp_map_tls_object_free(struct tls_data_object_t *obj);
+
+static int _stp_hstat_tls_object_init(struct tls_data_object_t *obj)
+{
+ MAP m = container_of(obj, struct map_root, object);
+ PMAP p = container_of(obj->container, struct pmap, container);
+
+ if (_stp_map_tls_object_init(obj) != 0)
+ return -1;
+
+ /* Copy the hist params from the agg. */
+ m->hist.type = p->agg.hist.type;
+ m->hist.start = p->agg.hist.start;
+ m->hist.stop = p->agg.hist.stop;
+ m->hist.interval = p->agg.hist.interval;
+ m->hist.buckets = p->agg.hist.buckets;
+ return 0;
+}
+#endif
+
static PMAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int start, int stop, int interval)
{
PMAP pmap;
if (pmap) {
int i;
MAP m;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = (MAP)per_cpu_ptr (pmap->map, i);
m->hist.type = HIST_LINEAR;
m->hist.interval = interval;
m->hist.buckets = buckets;
}
- /* now set agg map params */
+#else
+ /* Override the tls data object init function with one
+ * that knows how to handle hstats. */
+ _stp_tls_data_container_update(&pmap->container,
+ &_stp_hstat_tls_object_init,
+ &_stp_map_tls_object_free);
+#endif
+ /* now set agg map params */
m = &pmap->agg;
m->hist.type = HIST_LINEAR;
m->hist.start = start;
if (pmap) {
int i;
MAP m;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = (MAP)per_cpu_ptr (pmap->map, i);
m->hist.type = HIST_LOG;
m->hist.buckets = HIST_LOG_BUCKETS;
}
+#else
+ /* Override the tls data object init function with one
+ * that knows how to handle hstats. */
+ _stp_tls_data_container_update(&pmap->container,
+ &_stp_hstat_tls_object_init,
+ &_stp_map_tls_object_free);
+#endif
/* now set agg map params */
m = &pmap->agg;
m->hist.type = HIST_LOG;
/* -*- linux-c -*-
* Map Functions
- * Copyright (C) 2005-2009 Red Hat Inc.
+ * Copyright (C) 2005-2009, 2012 Red Hat Inc.
*
* This file is part of systemtap, and is free software. You can
* redistribute it and/or modify it under the terms of the GNU General
#include "stat-common.c"
#include "map-stat.c"
+#if NEED_MAP_LOCKS
+#ifdef __KERNEL__
+#define MAP_LOCK(m) spin_lock(&(m)->lock)
+#define MAP_UNLOCK(m) spin_unlock(&(m)->lock)
+#else
+#define MAP_LOCK(m) pthread_mutex_lock(&(m)->lock)
+#define MAP_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
+#endif
+#else
+#define MAP_LOCK(m) do {} while (0)
+#define MAP_UNLOCK(m) do {} while (0)
+#endif
+
static int map_sizes[] = {
sizeof(int64_t),
MAP_STRING_LENGTH,
else
tmp = _stp_kmalloc_node_gfp(size, cpu_to_node(cpu), STP_ALLOC_SLEEP_FLAGS);
- if (!tmp)
+ if (!tmp) {
+ _stp_error("error allocating map entry\n");
return -1;
+ }
// dbug ("allocated %lx\n", (long)tmp);
list_add((struct list_head *)tmp, &m->pool);
return m;
}
+#ifndef __KERNEL__
+static int _stp_map_tls_object_init(struct tls_data_object_t *obj)
+{
+ MAP m = container_of(obj, struct map_root, object);
+ PMAP p = container_of(obj->container, struct pmap, container);
+
+ INIT_LIST_HEAD(&m->pool);
+ INIT_LIST_HEAD(&m->head);
+ m->hashes = NULL;
+
+#if NEED_MAP_LOCKS
+ {
+ int rc;
+ if ((rc = pthread_mutex_init(&m->lock, NULL)) != 0) {
+ _stp_error("Couldn't initialize map mutex: %d\n", rc);
+ return -1;
+ }
+ }
+#endif
+
+ /* To get the correct parameters for _stp_map_init(), get them
+ * from the cached values in PMAP. */
+ if (_stp_map_init(m, p->max_entries, p->type, p->key_size,
+ p->data_size, -1) != 0) {
+ __stp_map_del(m);
+#if NEED_MAP_LOCKS
+ (void)pthread_mutex_destroy(&m->lock);
+#endif
+ return -1;
+ }
+
+ return 0;
+}
+
+static void _stp_map_tls_object_free(struct tls_data_object_t *obj)
+{
+ MAP m = container_of(obj, struct map_root, object);
+ __stp_map_del(m);
+#if NEED_MAP_LOCKS
+ (void)pthread_mutex_destroy(&m->lock);
+#endif
+}
+#endif
+
static PMAP _stp_pmap_new(unsigned max_entries, int type, int key_size, int data_size)
{
int i;
if (pmap == NULL)
return NULL;
+#ifdef __KERNEL__
pmap->map = map = (MAP) _stp_alloc_percpu (sizeof(struct map_root));
if (map == NULL)
goto err;
+#else
+ if (_stp_tls_data_container_init(&pmap->container,
+ sizeof(struct map_root),
+ &_stp_map_tls_object_init,
+ &_stp_map_tls_object_free) != 0)
+ goto err;
+#endif
+#ifdef __KERNEL__
/* initialize the memory lists first so if allocations fail */
/* at some point, it is easy to clean up. */
for_each_possible_cpu(i) {
INIT_LIST_HEAD(&m->pool);
INIT_LIST_HEAD(&m->head);
}
+#endif
INIT_LIST_HEAD(&pmap->agg.pool);
INIT_LIST_HEAD(&pmap->agg.head);
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = per_cpu_ptr (map, i);
if (_stp_map_init(m, max_entries, type, key_size, data_size, i)) {
goto err1;
}
}
+#else
+ /* Cache values for use by _stp_map_tls_object_init(). */
+ pmap->max_entries = max_entries;
+ pmap->type = type;
+ pmap->key_size = key_size;
+ pmap->data_size = data_size;
+#endif
if (_stp_map_init(&pmap->agg, max_entries, type, key_size, data_size, -1))
goto err1;
return pmap;
err1:
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = per_cpu_ptr (map, i);
__stp_map_del(m);
}
_stp_free_percpu(map);
+#else
+ _stp_tls_data_container_cleanup(&pmap->container);
+#endif
err:
_stp_kfree(pmap);
return NULL;
if (pmap == NULL)
return;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
MAP m = per_cpu_ptr (pmap->map, i);
-#if NEED_MAP_LOCKS
- spin_lock(&m->lock);
-#endif
+
+ MAP_LOCK(m);
_stp_map_clear(m);
-#if NEED_MAP_LOCKS
- spin_unlock(&m->lock);
-#endif
+ MAP_UNLOCK(m);
+ }
+#else
+ {
+ struct tls_data_object_t *obj;
+ TLS_DATA_CONTAINER_LOCK(&pmap->container);
+ for_each_tls_data(obj, &pmap->container) {
+ MAP m = container_of(obj, struct map_root, object);
+
+ MAP_LOCK(m);
+ _stp_map_clear(m);
+ MAP_UNLOCK(m);
+ }
+ TLS_DATA_CONTAINER_UNLOCK(&pmap->container);
}
+#endif
_stp_map_clear(&pmap->agg);
}
if (pmap == NULL)
return;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
MAP m = per_cpu_ptr (pmap->map, i);
__stp_map_del(m);
}
_stp_free_percpu(pmap->map);
+#else
+ _stp_tls_data_container_cleanup(&pmap->container);
+#endif
/* free agg map elements */
__stp_map_del(&pmap->agg);
struct map_node *ptr, *aptr = NULL;
struct hlist_head *head, *ahead;
struct hlist_node *e, *f;
+#ifndef __KERNEL__
+ struct tls_data_object_t *obj;
+#endif
agg = &pmap->agg;
/* every time we aggregate. which would be best? */
_stp_map_clear (agg);
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = per_cpu_ptr (pmap->map, i);
-#if NEED_MAP_LOCKS
- spin_lock(&m->lock);
+#else
+ TLS_DATA_CONTAINER_LOCK(&pmap->container);
+ for_each_tls_data(obj, &pmap->container) {
+ m = container_of(obj, struct map_root, object);
#endif
+ MAP_LOCK(m);
/* walk the hash chains. */
for (hash = 0; hash < HASH_TABLE_SIZE; hash++) {
head = &m->hashes[hash];
_stp_add_agg(aptr, ptr);
else {
if (!_stp_new_agg(agg, ahead, ptr)) {
-#if NEED_MAP_LOCKS
- spin_unlock(&m->lock);
-#endif
+ MAP_UNLOCK(m);
return NULL;
}
}
}
}
-#if NEED_MAP_LOCKS
- spin_unlock(&m->lock);
-#endif
+ MAP_UNLOCK(m);
}
+#ifndef __KERNEL__
+ TLS_DATA_CONTAINER_UNLOCK(&pmap->container);
+#endif
return agg;
}
{
int i, num = 0;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
MAP m = per_cpu_ptr (pmap->map, i);
num += m->num;
}
+#else
+ struct tls_data_object_t *obj;
+ TLS_DATA_CONTAINER_LOCK(&pmap->container);
+ for_each_tls_data(obj, &pmap->container) {
+ MAP m = container_of(obj, struct map_root, object);
+ num += m->num;
+ }
+ TLS_DATA_CONTAINER_UNLOCK(&pmap->container);
+#endif
return num;
}
#endif /* _MAP_C_ */
/* -*- linux-c -*-
* Map Header File
- * Copyright (C) 2005 Red Hat Inc.
+ * Copyright (C) 2005, 2012 Red Hat Inc.
*
* This file is part of systemtap, and is free software. You can
* redistribute it and/or modify it under the terms of the GNU General
#include <linux/log2.h>
#elif defined(__DYNINST__)
#include "dyninst/ilog2.h"
+#include "dyninst/tls_data.c"
#endif
/** @file map.h
* It is allocated once when _stp_map_new() is called.
*/
struct map_root {
+#ifndef __KERNEL__
+ /* Note that the tls_data_object_t must be first in struct
+ * map_root. */
+ struct tls_data_object_t object;
+#endif
/* type of the value stored in the array */
int type;
typedef struct map_root *MAP;
struct pmap {
+#ifdef __KERNEL__
MAP map; /* per-cpu maps */
+#else
+ struct tls_data_container_t container;
+
+ /* Cached _stp_map_init() values. */
+ unsigned max_entries;
+ int type;
+ int key_size;
+ int data_size;
+#endif
struct map_root agg; /* aggregation map */
};
typedef struct pmap *PMAP;
/* -*- linux-c -*-
* pmap API generator
- * Copyright (C) 2005-2008 Red Hat Inc.
+ * Copyright (C) 2005-2008, 2012 Red Hat Inc.
*
* This file is part of systemtap, and is free software. You can
* redistribute it and/or modify it under the terms of the GNU General
return (unsigned int) (hash % HASH_TABLE_SIZE);
}
+#ifndef __KERNEL__
+static int _stp_map_tls_object_init(struct tls_data_object_t *obj);
+static void _stp_map_tls_object_free(struct tls_data_object_t *obj);
+
+static int KEYSYM(_stp_pmap_tls_object_init)(struct tls_data_object_t *obj)
+{
+ MAP m = container_of(obj, struct map_root, object);
+ PMAP p = container_of(obj->container, struct pmap, container);
+
+ if (_stp_map_tls_object_init(obj) != 0)
+ return -1;
+
+ m->get_key = KEYSYM(pmap_get_key);
+ m->copy = KEYSYM(pmap_copy_keys);
+ m->cmp = KEYSYM(pmap_key_cmp);
+ return 0;
+}
+#endif
#if VALUE_TYPE == INT64 || VALUE_TYPE == STRING
static PMAP KEYSYM(_stp_pmap_new) (unsigned max_entries)
if (pmap) {
int i;
MAP m;
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
m = per_cpu_ptr (pmap->map, i);
m->get_key = KEYSYM(pmap_get_key);
spin_lock_init(m->lock);
#endif
}
+#else
+ /* Override the tls data object init function with one
+ * that knows how to handle pmaps. */
+ _stp_tls_data_container_update(&pmap->container,
+ &KEYSYM(_stp_pmap_tls_object_init),
+ &_stp_map_tls_object_free);
+#endif
m = &pmap->agg;
m->get_key = KEYSYM(pmap_get_key);
m->copy = KEYSYM(pmap_copy_keys);
static int KEYSYM(_stp_pmap_set) (PMAP pmap, ALLKEYSD(key), VSTYPE val)
{
int res;
+#ifdef __KERNEL__
MAP m = per_cpu_ptr (pmap->map, MAP_GET_CPU ());
+#else
+ struct tls_data_object_t *obj;
+ MAP m;
+
+ obj = _stp_tls_get_per_thread_ptr(&pmap->container);
+ if (!obj)
+ return -ENOMEM;
+ m = container_of(obj, struct map_root, object);
+#endif
#if NEED_MAP_LOCKS
if (!spin_trylock(&m->lock))
return -3;
static int KEYSYM(_stp_pmap_add) (PMAP pmap, ALLKEYSD(key), VSTYPE val)
{
int res;
- MAP m = per_cpu_ptr (pmap->map, MAP_GET_CPU());
+#ifdef __KERNEL__
+ MAP m = per_cpu_ptr (pmap->map, MAP_GET_CPU ());
+#else
+ struct tls_data_object_t *obj;
+ MAP m;
+
+ obj = _stp_tls_get_per_thread_ptr(&pmap->container);
+ if (!obj)
+ return -ENOMEM;
+ m = container_of(obj, struct map_root, object);
+#endif
#if NEED_MAP_LOCKS
if (!spin_trylock(&m->lock))
return -3;
struct KEYSYM(pmap_node) *n;
VALTYPE res;
MAP map;
+#ifndef __KERNEL__
+ struct tls_data_object_t *obj;
+#endif
if (pmap == NULL)
return NULLRET;
+#ifdef __KERNEL__
map = per_cpu_ptr (pmap->map, MAP_GET_CPU ());
+#else
+ obj = _stp_tls_get_per_thread_ptr(&pmap->container);
+ if (!obj)
+ return NULLRET;
+ map = container_of(obj, struct map_root, object);
+#endif
hv = KEYSYM(phash) (ALLKEYS(key));
head = &map->hashes[hv];
struct KEYSYM(pmap_node) *n;
struct map_node *anode = NULL;
MAP map, agg;
+#ifndef __KERNEL__
+ struct tls_data_object_t *obj;
+#endif
if (pmap == NULL)
return NULLRET;
}
/* now total each cpu */
+#ifdef __KERNEL__
for_each_possible_cpu(cpu) {
map = per_cpu_ptr (pmap->map, cpu);
+#else
+ TLS_DATA_CONTAINER_LOCK(&pmap->container);
+ for_each_tls_data(obj, &pmap->container) {
+ map = container_of(obj, struct map_root, object);
+#endif
head = &map->hashes[hv];
#if NEED_MAP_LOCKS
spin_unlock(&map->lock);
#endif
}
+#ifndef __KERNEL__
+ TLS_DATA_CONTAINER_UNLOCK(&pmap->container);
+#endif
if (anode && !clear_agg)
return MAP_GET_VAL(anode);
static int KEYSYM(_stp_pmap_del) (PMAP pmap, ALLKEYSD(key))
{
int res;
+#ifdef __KERNEL__
MAP m = per_cpu_ptr (pmap->map, MAP_GET_CPU ());
+#else
+ struct tls_data_object_t *obj;
+ MAP m;
+
+ obj = _stp_tls_get_per_thread_ptr(&pmap->container);
+ if (!obj)
+ return -ENOMEM;
+ m = container_of(obj, struct map_root, object);
+#endif
#if NEED_MAP_LOCKS
if (!spin_trylock(&m->lock))
return -1;
/* -*- linux-c -*-
* Statistics Aggregation
- * Copyright (C) 2005-2008 Red Hat Inc.
+ * Copyright (C) 2005-2008, 2012 Red Hat Inc.
* Copyright (C) 2006 Intel Corporation
*
* This file is part of systemtap, and is free software. You can
* @{
*/
+#ifndef __KERNEL__
+#include <pthread.h>
+#endif
+
#include "stat-common.c"
/* for the paranoid. */
#if NEED_STAT_LOCKS == 1
+#ifdef __KERNEL__
#define STAT_LOCK(sd) spin_lock(&sd->lock)
#define STAT_UNLOCK(sd) spin_unlock(&sd->lock)
#else
+#define STAT_LOCK(sd) pthread_mutex_lock(&sd->lock)
+#define STAT_UNLOCK(sd) pthread_mutex_unlock(&sd->lock)
+#endif
+#else
#define STAT_LOCK(sd) ;
#define STAT_UNLOCK(sd) ;
#endif
/** Stat struct for stat.c. Maps do not need this */
struct _Stat {
struct _Hist hist;
- /* per-cpu data. allocated with _stp_alloc_percpu() */
+
+ /*
+ * In kernel-mode, the stat data is per-cpu data (allocated
+ * with _stp_alloc_percpu()) stored in 'sd'. In dyninst-mode,
+ * the stat data is thread local storage.
+ */
+#ifdef __KERNEL__
stat_data *sd;
+#else
+ struct tls_data_container_t container;
+#endif
/* aggregated data */
stat_data *agg;
};
typedef struct _Stat *Stat;
+#ifndef __KERNEL__
+#if NEED_STAT_LOCKS == 1
+static int _stp_stat_tls_object_init(struct tls_data_object_t *obj)
+{
+ stat_data *sd = container_of(obj, stat_data, object);
+
+ int rc;
+ if ((rc = pthread_mutex_init(&sd->lock, NULL)) != 0) {
+ _stp_error("Couldn't initialize stat mutex: %d\n", rc);
+ return -1;
+ }
+ return 0;
+}
+
+static void _stp_stat_tls_object_free(struct tls_data_object_t *obj)
+{
+ stat_data *sd = container_of(obj, stat_data, object);
+ (void)pthread_mutex_destroy(&sd->lock);
+}
+#endif /* NEED_STAT_LOCKS == 1 */
+#endif /* !__KERNEL__ */
/** Initialize a Stat.
* Call this during probe initialization to create a Stat.
static Stat _stp_stat_init (int type, ...)
{
int size, buckets=0, start=0, stop=0, interval=0;
+#ifdef __KERNEL__
stat_data *sd, *agg;
+#else
+ stat_data *agg;
+#endif
Stat st;
if (type != HIST_NONE) {
return NULL;
size = buckets * sizeof(int64_t) + sizeof(stat_data);
+#ifdef __KERNEL__
sd = (stat_data *) _stp_alloc_percpu (size);
if (sd == NULL)
goto exit1;
+ st->sd = sd;
#if NEED_STAT_LOCKS == 1
{
spin_lock_init(sdp->lock);
}
}
-#endif
+#endif /* NEED_STAT_LOCKS == 1 */
+
+#else /* !__KERNEL__ */
+
+#if NEED_STAT_LOCKS == 1
+ if (_stp_tls_data_container_init(&st->container, size,
+ &_stp_stat_tls_object_init,
+ &_stp_stat_tls_object_free) != 0)
+#else /* NEED_STAT_LOCKS !=1 */
+ if (_stp_tls_data_container_init(&st->container, size,
+ NULL, NULL) != 0)
+#endif /* NEED_STAT_LOCKS != 1 */
+ goto exit1;
+#endif /* !__KERNEL__ */
agg = (stat_data *)_stp_kmalloc_gfp(size, STP_ALLOC_SLEEP_FLAGS);
if (agg == NULL)
st->hist.stop = stop;
st->hist.interval = interval;
st->hist.buckets = buckets;
- st->sd = sd;
st->agg = agg;
return st;
exit2:
+#ifdef __KERNEL__
_stp_kfree (sd);
+#else
+ _stp_tls_data_container_cleanup(&st->container);
+#endif
exit1:
_stp_kfree (st);
return NULL;
static void _stp_stat_del (Stat st)
{
if (st) {
+#ifdef __KERNEL__
_stp_free_percpu (st->sd);
+#else /* !__KERNEL__ */
+ _stp_tls_data_container_cleanup(&st->container);
+#endif /* !__KERNEL__ */
_stp_kfree (st->agg);
_stp_kfree (st);
}
*/
static void _stp_stat_add (Stat st, int64_t val)
{
+#ifdef __KERNEL__
stat_data *sd = per_cpu_ptr (st->sd, get_cpu());
+#else
+ struct tls_data_object_t *obj;
+ stat_data *sd;
+
+ obj = _stp_tls_get_per_thread_ptr(&st->container);
+ if (!obj)
+ return;
+ sd = container_of(obj, stat_data, object);
+#endif
STAT_LOCK(sd);
__stp_stat_add (&st->hist, sd, val);
STAT_UNLOCK(sd);
put_cpu();
}
-/** Get per-cpu Stats.
- * Gets the Stats for a specific CPU.
- *
- * If NEED_STAT_LOCKS is set, you MUST call STAT_UNLOCK()
- * when you are finished with the returned pointer.
- *
- * @param st Stat
- * @param cpu CPU number
- * @returns A pointer to a stat.
- */
-static stat_data *_stp_stat_get_cpu (Stat st, int cpu)
-{
- stat_data *sd = per_cpu_ptr (st->sd, cpu);
- STAT_LOCK(sd);
- return sd;
-}
static void _stp_stat_clear_data (Stat st, stat_data *sd)
{
{
int i, j;
stat_data *agg = st->agg;
+ stat_data *sd;
+#ifndef __KERNEL__
+ struct tls_data_object_t *obj;
+#endif
STAT_LOCK(agg);
_stp_stat_clear_data (st, agg);
+#ifdef __KERNEL__
for_each_possible_cpu(i) {
- stat_data *sd = per_cpu_ptr (st->sd, i);
+ sd = per_cpu_ptr (st->sd, i);
+#else
+ TLS_DATA_CONTAINER_LOCK(&st->container);
+ for_each_tls_data(obj, &st->container) {
+ sd = container_of(obj, stat_data, object);
+#endif
STAT_LOCK(sd);
if (sd->count) {
if (agg->count == 0) {
}
STAT_UNLOCK(sd);
}
+#ifndef __KERNEL__
+ TLS_DATA_CONTAINER_UNLOCK(&st->container);
+#endif
return agg;
}
*/
static void _stp_stat_clear (Stat st)
{
+#ifdef __KERNEL__
int i;
for_each_possible_cpu(i) {
stat_data *sd = per_cpu_ptr (st->sd, i);
+#else
+ struct tls_data_object_t *obj;
+ TLS_DATA_CONTAINER_LOCK(&st->container);
+ for_each_tls_data(obj, &st->container) {
+ stat_data *sd = container_of(obj, stat_data, object);
+#endif
STAT_LOCK(sd);
_stp_stat_clear_data (st, sd);
STAT_UNLOCK(sd);
}
+#ifndef __KERNEL__
+ TLS_DATA_CONTAINER_UNLOCK(&st->container);
+#endif
}
/** @} */
#endif /* _STAT_C_ */
/* -*- linux-c -*-
* Statistics Header
- * Copyright (C) 2005 Red Hat Inc.
+ * Copyright (C) 2005, 2012 Red Hat Inc.
*
* This file is part of systemtap, and is free software. You can
* redistribute it and/or modify it under the terms of the GNU General
#ifndef _STAT_H_
#define _STAT_H_
+#ifndef __KERNEL__
+#include "dyninst/tls_data.c"
+#endif
+
#ifndef NEED_STAT_LOCKS
#define NEED_STAT_LOCKS 0
#endif
/** Statistics are stored in this struct. This is per-cpu or per-node data
and is variable length due to the unknown size of the histogram. */
struct stat_data {
+#ifndef __KERNEL__
+ /* Note that the tls_data_object_t must be first in struct
+ * stat_data. */
+ struct tls_data_object_t object;
+#endif
int64_t count;
int64_t sum;
int64_t min, max;
#if NEED_STAT_LOCKS == 1
+#ifdef __KERNEL__
spinlock_t lock;
+#else /* !__KERNEL__ */
+ pthread_mutex_t lock;
+#endif /* !__KERNEL__ */
#endif
int64_t histogram[];
};