offptr_t omap[]; /* per-cpu maps */
};
-static inline PMAP _stp_pmap_alloc(void)
-{
- return calloc((1 + _stp_runtime_num_contexts), sizeof(offptr_t));
-}
-
static inline MAP _stp_pmap_get_agg(PMAP p)
{
return offptr_get(&p->oagg);
offptr_set(&p->omap[cpu], m);
}
+
+static void __stp_map_del(MAP map)
+{
+ if (map == NULL)
+ return;
+
+ /* The lock is the only thing to clean up. */
+ _stp_map_destroy_lock(map);
+}
+
+
+/** Deletes a map.
+ * Deletes a map, freeing all memory in all elements.
+ * Normally done only when the module exits.
+ * @param map
+ */
+
+static void _stp_map_del(MAP map)
+{
+ if (map == NULL)
+ return;
+
+ __stp_map_del(map);
+ free(map);
+}
+
+static void _stp_pmap_del(PMAP pmap)
+{
+ int i;
+
+ if (pmap == NULL)
+ return;
+
+ /* The pmap is one giant allocation, so do only
+ * the basic cleanup for each map. */
+ for_each_possible_cpu(i)
+ __stp_map_del(_stp_pmap_get_map (pmap, i));
+ __stp_map_del(_stp_pmap_get_agg(pmap));
+ _stp_kfree(pmap);
+}
+
+
+static int
+_stp_map_init(MAP m, unsigned max_entries, int wrap, int node_size)
+{
+ unsigned i;
+
+ /* The node memory is allocated right after the map itself. */
+ void *node_mem = m + 1;
+
+ INIT_MLIST_HEAD(&m->pool);
+ INIT_MLIST_HEAD(&m->head);
+ for (i = 0; i < HASH_TABLE_SIZE; i++)
+ INIT_MHLIST_HEAD(&m->hashes[i]);
+
+ m->maxnum = max_entries;
+ m->wrap = wrap;
+
+ for (i = 0; i < max_entries; i++) {
+ struct map_node *node = node_mem + i * node_size;
+ mlist_add(&node->lnode, &m->pool);
+ INIT_MHLIST_NODE(&node->hnode);
+ }
+
+ if (_stp_map_initialize_lock(m) != 0)
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a new map.
+ * Maps must be created at module initialization time.
+ * @param max_entries The maximum number of entries allowed. Currently that
+ * number will be preallocated.If more entries are required, the oldest ones
+ * will be deleted. This makes it effectively a circular buffer.
+ * @return A MAP on success or NULL on failure.
+ * @ingroup map_create
+ */
+
+static MAP
+_stp_map_new(unsigned max_entries, int wrap, int node_size,
+ int cpu __attribute((unused)))
+{
+ MAP m;
+
+ /* NB: Allocate the map in one big chuck.
+ * (See _stp_pmap_new for more explanation) */
+ size_t map_size = sizeof(struct map_root) + node_size * max_entries;
+ m = calloc(1, map_size);
+ if (m == NULL)
+ return NULL;
+
+ if (_stp_map_init(m, max_entries, wrap, node_size)) {
+ _stp_map_del(m);
+ return NULL;
+ }
+ return m;
+}
+
+static PMAP
+_stp_pmap_new(unsigned max_entries, int wrap, int node_size)
+{
+ int i;
+ MAP m;
+ PMAP pmap;
+ void *map_mem;
+
+ /* Allocate the pmap in one big chuck.
+ *
+ * The reason for this is that we're allocating in the shared memory
+ * mmap, which may have to move locations in order to grow. If some
+ * smaller unit of the pmap allocation were to cause the whole thing to
+ * move, then we'd lose track of the prior allocations.
+ *
+ * Once returned from here, we'll always access the pmap via the global
+ * shared memory base. So if other map/pmap/stat/etc. allocations
+ * cause it to move later, that's ok.
+ */
+
+ size_t map_size = sizeof(struct map_root) + node_size * max_entries;
+
+ size_t pmap_size = sizeof(struct pmap) +
+ sizeof(offptr_t) * _stp_runtime_num_contexts;
+
+ size_t total_size = pmap_size +
+ map_size * (_stp_runtime_num_contexts + 1);
+
+ map_mem = pmap = calloc(1, total_size);
+ if (pmap == NULL)
+ return NULL;
+ map_mem += pmap_size;
+
+ for_each_possible_cpu(i)
+ _stp_pmap_set_map(pmap, NULL, i);
+ _stp_pmap_set_agg(pmap, NULL);
+
+ /* Initialize the per-cpu maps. */
+ for_each_possible_cpu(i) {
+ m = map_mem;
+ if (_stp_map_init(m, max_entries, wrap, node_size) != 0)
+ goto err;
+ _stp_pmap_set_map(pmap, m, i);
+ map_mem += map_size;
+ }
+
+ /* Initialize the aggregate map. */
+ m = map_mem;
+ if (_stp_map_init(m, max_entries, wrap, node_size) != 0)
+ goto err;
+ _stp_pmap_set_agg(pmap, m);
+
+ return pmap;
+
+err:
+ _stp_pmap_del(pmap);
+ return NULL;
+}
+
#endif /* _STAPDYN_MAP_RUNTIME_H_ */
if (stat_data_size < sizeof(stat_data))
return NULL;
- /* This is done as one big allocation, then
- * assigning offptrs to each sub-piece. */
+ /* NB: This is done as one big allocation, then assigning offptrs to
+ * each sub-piece. (See _stp_pmap_new for more explanation) */
st = mem = calloc(1, total_size);
if (st == NULL)
return NULL;
MAP map[]; /* per-cpu maps */
};
-static inline PMAP _stp_pmap_alloc(void)
-{
- /* Called from module_init, so user context, may sleep alloc. */
- return _stp_kzalloc_gfp((1 + NR_CPUS) * sizeof(MAP),
- STP_ALLOC_SLEEP_FLAGS);
-}
-
static inline MAP _stp_pmap_get_agg(PMAP p)
{
return p->agg;
p->map[cpu] = m;
}
+
+/** Deletes a map.
+ * Deletes a map, freeing all memory in all elements.
+ * Normally done only when the module exits.
+ * @param map
+ */
+
+static void _stp_map_del(MAP map)
+{
+ struct mlist_head *p, *tmp;
+
+ if (map == NULL)
+ return;
+
+ /* free unused pool */
+ mlist_for_each_safe(p, tmp, &map->pool) {
+ mlist_del(p);
+ _stp_kfree(p);
+ }
+
+ /* free used list */
+ mlist_for_each_safe(p, tmp, &map->head) {
+ mlist_del(p);
+ _stp_kfree(p);
+ }
+
+ _stp_map_destroy_lock(map);
+
+ _stp_kfree(map);
+}
+
+static void _stp_pmap_del(PMAP pmap)
+{
+ int i;
+
+ if (pmap == NULL)
+ return;
+
+ for_each_possible_cpu(i) {
+ MAP m = _stp_pmap_get_map (pmap, i);
+ _stp_map_del(m);
+ }
+
+ /* free agg map elements */
+ _stp_map_del(_stp_pmap_get_agg(pmap));
+
+ _stp_kfree(pmap);
+}
+
+
+static void*
+_stp_map_kzalloc(size_t size, int cpu)
+{
+ /* Called from module_init, so user context, may sleep alloc. */
+ if (cpu < 0)
+ return _stp_kzalloc_gfp(size, STP_ALLOC_SLEEP_FLAGS);
+ return _stp_kzalloc_node_gfp(size, cpu_to_node(cpu),
+ STP_ALLOC_SLEEP_FLAGS);
+}
+
+
+static int
+_stp_map_init(MAP m, unsigned max_entries, int wrap, int node_size, int cpu)
+{
+ unsigned i;
+
+ INIT_MLIST_HEAD(&m->pool);
+ INIT_MLIST_HEAD(&m->head);
+ for (i = 0; i < HASH_TABLE_SIZE; i++)
+ INIT_MHLIST_HEAD(&m->hashes[i]);
+
+ m->maxnum = max_entries;
+ m->wrap = wrap;
+
+ /* It would be nice to allocate the nodes in one big chunk, but
+ * sometimes they're big, and there may be a lot of them, so memory
+ * fragmentation may cause us to fail allocation. */
+ for (i = 0; i < max_entries; i++) {
+ struct map_node *node = _stp_map_kzalloc(node_size, cpu);
+ if (node == NULL)
+ return -1;
+
+ mlist_add(&node->lnode, &m->pool);
+ INIT_MHLIST_NODE(&node->hnode);
+ }
+
+ if (_stp_map_initialize_lock(m) != 0)
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a new map.
+ * Maps must be created at module initialization time.
+ * @param max_entries The maximum number of entries allowed. Currently that
+ * number will be preallocated.If more entries are required, the oldest ones
+ * will be deleted. This makes it effectively a circular buffer.
+ * @return A MAP on success or NULL on failure.
+ * @ingroup map_create
+ */
+
+static MAP
+_stp_map_new(unsigned max_entries, int wrap, int node_size, int cpu)
+{
+ MAP m;
+
+ m = _stp_map_kzalloc(sizeof(struct map_root), cpu);
+ if (m == NULL)
+ return NULL;
+
+ if (_stp_map_init(m, max_entries, wrap, node_size, cpu)) {
+ _stp_map_del(m);
+ return NULL;
+ }
+ return m;
+}
+
+static PMAP
+_stp_pmap_new(unsigned max_entries, int wrap, int node_size)
+{
+ int i;
+ MAP m;
+
+ PMAP pmap = _stp_map_kzalloc(sizeof(struct pmap)
+ + NR_CPUS * sizeof(MAP), -1);
+ if (pmap == NULL)
+ return NULL;
+
+ /* Allocate the per-cpu maps. */
+ for_each_possible_cpu(i) {
+ m = _stp_map_new(max_entries, wrap, node_size, i);
+ if (m == NULL)
+ goto err1;
+ _stp_pmap_set_map(pmap, m, i);
+ }
+
+ /* Allocate the aggregate map. */
+ m = _stp_map_new(max_entries, wrap, node_size, -1);
+ if (m == NULL)
+ goto err1;
+ _stp_pmap_set_agg(pmap, m);
+
+ return pmap;
+
+err1:
+ for_each_possible_cpu(i) {
+ m = _stp_pmap_get_map (pmap, i);
+ _stp_map_del(m);
+ }
+err:
+ _stp_kfree(pmap);
+ return NULL;
+}
+
#endif /* _LINUX_MAP_RUNTIME_H_ */
*/
-static void*
-_stp_map_kzalloc(size_t size, int cpu)
-{
- /* Called from module_init, so user context, may sleep alloc. */
- if (cpu < 0)
- return _stp_kzalloc_gfp(size, STP_ALLOC_SLEEP_FLAGS);
- return _stp_kzalloc_node_gfp(size, cpu_to_node(cpu),
- STP_ALLOC_SLEEP_FLAGS);
-}
-
-
-/** Create a new map.
- * Maps must be created at module initialization time.
- * @param max_entries The maximum number of entries allowed. Currently that number will
- * be preallocated. If more entries are required, the oldest ones will be deleted. This makes
- * it effectively a circular buffer. If max_entries is 0, there will be no maximum and entries
- * will be allocated dynamically.
- * @param type Type of values stored in this map.
- * @return A MAP on success or NULL on failure.
- * @ingroup map_create
- */
-
-static int
-_stp_map_init(MAP m, unsigned max_entries, int wrap, int node_size, int cpu)
-{
- unsigned i;
-
- INIT_MLIST_HEAD(&m->pool);
- INIT_MLIST_HEAD(&m->head);
- for (i = 0; i < HASH_TABLE_SIZE; i++)
- INIT_MHLIST_HEAD(&m->hashes[i]);
-
- m->maxnum = max_entries;
- m->wrap = wrap;
-
- /* It would be nice to allocate the nodes in one big chunk, but
- * sometimes they're big, and there may be a lot of them, so memory
- * fragmentation may cause us to fail allocation. */
- for (i = 0; i < max_entries; i++) {
- struct map_node *node = _stp_map_kzalloc(node_size, cpu);
- if (node == NULL)
- return -1;
-
- mlist_add(&node->lnode, &m->pool);
- INIT_MHLIST_NODE(&node->hnode);
- }
-
- if (_stp_map_initialize_lock(m) != 0)
- return -1;
-
- return 0;
-}
-
-
-static MAP
-_stp_map_new(unsigned max_entries, int wrap, int node_size, int cpu)
-{
- MAP m;
-
- m = _stp_map_kzalloc(sizeof(struct map_root), cpu);
- if (m == NULL)
- return NULL;
-
- if (_stp_map_init(m, max_entries, wrap, node_size, cpu)) {
- _stp_map_del(m);
- return NULL;
- }
- return m;
-}
-
-static PMAP
-_stp_pmap_new(unsigned max_entries, int wrap, int node_size)
-{
- int i;
- MAP m;
-
- /* Called from module_init, so user context, may sleep alloc. */
- PMAP pmap = _stp_pmap_alloc();
- if (pmap == NULL)
- return NULL;
-
- /* Allocate the per-cpu maps. */
- for_each_possible_cpu(i) {
- m = _stp_map_new(max_entries, wrap, node_size, i);
- if (m == NULL)
- goto err1;
- _stp_pmap_set_map(pmap, m, i);
- }
-
- /* Allocate the aggregate map. */
- m = _stp_map_new(max_entries, wrap, node_size, -1);
- if (m == NULL)
- goto err1;
- _stp_pmap_set_agg(pmap, m);
-
- return pmap;
-
-err1:
- for_each_possible_cpu(i) {
- m = _stp_pmap_get_map (pmap, i);
- _stp_map_del(m);
- }
-err:
- _stp_kfree(pmap);
- return NULL;
-}
-
-
/** Get the first element in a map.
* @param map
* @returns a pointer to the first element.
}
-/** Deletes a map.
- * Deletes a map, freeing all memory in all elements. Normally done only when the module exits.
- * @param map
- */
-
-static void _stp_map_del(MAP map)
-{
- struct mlist_head *p, *tmp;
-
- if (map == NULL)
- return;
-
- /* free unused pool */
- mlist_for_each_safe(p, tmp, &map->pool) {
- mlist_del(p);
- _stp_kfree(p);
- }
-
- /* free used list */
- mlist_for_each_safe(p, tmp, &map->head) {
- mlist_del(p);
- _stp_kfree(p);
- }
-
- _stp_map_destroy_lock(map);
-
- _stp_kfree(map);
-}
-
-static void _stp_pmap_del(PMAP pmap)
-{
- int i;
-
- if (pmap == NULL)
- return;
-
- for_each_possible_cpu(i) {
- MAP m = _stp_pmap_get_map (pmap, i);
- _stp_map_del(m);
- }
-
- /* free agg map elements */
- _stp_map_del(_stp_pmap_get_agg(pmap));
-
- _stp_kfree(pmap);
-}
-
/* sort keynum values */
#define SORT_COUNT -5 /* see also translate.cxx:visit_foreach_loop */
#define SORT_SUM -4
int interval);
static PMAP _stp_pmap_new_hstat_log (unsigned max_entries, int wrap, int node_size);
static PMAP _stp_pmap_new_hstat (unsigned max_entries, int wrap, int node_size);
+static void _stp_pmap_del(PMAP pmap);
static MAP _stp_pmap_agg (PMAP pmap, map_update_fn update, map_cmp_fn cmp);
static struct map_node *_stp_new_agg(MAP agg, struct mhlist_head *ahead,
struct map_node *ptr, map_update_fn update);