struct addr_map_entry entries[0];
};
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(addr_map_lock);
+#else
static DEFINE_SPINLOCK(addr_map_lock);
-
+#endif
static struct addr_map* blackmap;
/* Find address of entry where we can insert a new one. */
static int _stp_allocated_memory = 0;
#ifdef DEBUG_MEM
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(_stp_mem_lock);
+#else
static DEFINE_SPINLOCK(_stp_mem_lock);
+#endif
#define MEM_MAGIC 0xc11cf77f
#define MEM_FENCE_SIZE 32
* @note Preemption must be disabled to use this.
*/
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(_stp_print_lock);
+#else
static DEFINE_SPINLOCK(_stp_print_lock);
+#endif
void EXPORT_FN(stp_print_flush)(_stp_pbuf *pb)
{
atomic_t *skipped;
#endif
#ifdef CONFIG_PREEMPT_RT
- raw_rwlock_t *lock
+ raw_rwlock_t *lock;
#else
rwlock_t *lock;
#endif
// contents in interrupt context (which should only ever call
// stap_find_map_map_info for getting stored info). So we might
// want to look into that if this seems a bottleneck.
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_RWLOCK(__stp_tf_map_lock);
+#else
static DEFINE_RWLOCK(__stp_tf_map_lock);
+#endif
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
// contents in interrupt context (which should only ever call
// stap_find_vma_map_info for getting stored vma info). So we might
// want to look into that if this seems a bottleneck.
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_RWLOCK(__stp_tf_vma_lock);
+#else
static DEFINE_RWLOCK(__stp_tf_vma_lock);
+#endif
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
static _stp_mempool_t *_stp_pool_q;
static struct list_head _stp_ctl_ready_q;
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(_stp_ctl_ready_lock);
+#else
static DEFINE_SPINLOCK(_stp_ctl_ready_lock);
+#endif
static void _stp_cleanup_and_exit(int send_exit);
/* Table of uprobe_tasks, hashed by task_struct pointer. */
static struct hlist_head utask_table[UPROBE_TABLE_SIZE];
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(utask_table_lock);
+#else
static DEFINE_SPINLOCK(utask_table_lock);
+#endif
#define lock_uproc_table() mutex_lock(&uproc_mutex)
#define unlock_uproc_table() mutex_unlock(&uproc_mutex)
/* Table of uprobe_tasks, hashed by task_struct pointer. */
static struct hlist_head utask_table[UPROBE_TABLE_SIZE];
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(utask_table_lock);
+#else
static DEFINE_SPINLOCK(utask_table_lock);
+#endif
#define lock_uproc_table() mutex_lock(&uproc_mutex)
#define unlock_uproc_table() mutex_unlock(&uproc_mutex)