sd.nprobes = nprobes;
sd.probe_max = probe_max;
sd.modname = NULL;
+ might_sleep();
mutex_lock(&module_mutex);
kallsyms_on_each_symbol(stapkp_symbol_callback, &sd);
mutex_unlock(&module_mutex);
sd.nprobes = nprobes;
sd.probe_max = probe_max;
sd.modname = modname;
+ might_sleep();
mutex_lock(&module_mutex);
kallsyms_on_each_symbol(stapkp_symbol_callback, &sd);
mutex_unlock(&module_mutex);
preempt_disable ();
c = _stp_runtime_get_context();
if (c != NULL) {
- if (atomic_inc_return(&c->busy) == 1)
+ if (atomic_inc_return(&c->busy) == 1) {
+ // NB: Notice we're not re-enabling preemption
+ // here. We exepect the calling code to call
+ // _stp_runtime_entryfn_get_context() and
+ // _stp_runtime_entryfn_put_context() as a
+ // pair.
return c;
+ }
atomic_dec(&c->busy);
}
preempt_enable_no_resched();
struct tracepoint_entry *e;
int ret = 0;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
e = get_tracepoint(name);
if (!e) {
struct tracepoint_entry *e;
int ret = 0;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
e = get_tracepoint(name);
if (!e) {
{
int i;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
struct tracepoint *tp;
{
int i;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
for (i = 0; i < tp_mod->mod->num_tracepoints; i++) {
struct tracepoint *tp;
struct stp_tp_probe *p;
int *ret = priv;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
e = get_tracepoint(tp->name);
if (!e) {
struct tracepoint_entry *e;
int *ret = priv;
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
e = get_tracepoint(tp->name);
if (!e || e->refcount != 1 || !list_empty(&e->probes)) {
stp_tracepoint_module_exit();
for_each_kernel_tracepoint(stp_kernel_tracepoint_remove, &ret);
+ might_sleep();
mutex_lock(&stp_tracepoint_mutex);
for (i = 0; i < TRACEPOINT_TABLE_SIZE; i++) {
struct hlist_head *head = &tracepoint_table[i];
stap_uprobes[] array to allocate a free spot, but then we can
unlock and do the register_*probe subsequently. */
+ might_sleep();
mutex_lock (& stap_uprobes_lock);
for (i=0; i<MAXUPROBES; i++) { /* XXX: slow linear search */
sup = & stap_uprobes[i];
_stp_warn ("u*probe failed %s[%d] '%s' addr %p rc %d\n", tsk->comm, tsk->tgid, sups->probe->pp, (void*)(relocation + sups->address), rc);
/* NB: we need to release this slot,
so we need to borrow the mutex temporarily. */
+ might_sleep();
mutex_lock (& stap_uprobes_lock);
sup->spec_index = -1;
sup->sdt_sem_address = 0;
struct stap_uprobe_spec *sups;
if (sup->spec_index < 0) continue; /* skip free uprobes slot */
sups = (struct stap_uprobe_spec*) & stap_uprobe_specs[sup->spec_index];
+ might_sleep();
mutex_lock (& stap_uprobes_lock);
/* PR6829, PR9940:
static inline void
stapiu_target_lock(struct stapiu_target *target)
{
+ might_sleep();
mutex_lock(&target->inode_lock);
}
{
int cpu, ret = 0;
+ might_sleep();
_stp_kill_time();
stp_time = _stp_alloc_percpu(sizeof(stp_time_t));
// PR17232: preclude reentrancy during handling of messages.
// This also permits use of static variables in the switch/case.
+ might_sleep();
mutex_lock (& cmd_mutex);
// NB: past this point, no 'return;' - use 'goto out;'
inode_lock(inode);
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ might_sleep();
mutex_lock(&inode->i_mutex);
#else
down(&inode->i_sem);
o->newline() << "const char* version = UTS_VERSION;";
o->newline() << "#endif";
+ // The systemtap_module_init() function must be run in
+ // non-atomic context, since several functions might need to
+ // sleep.
+ o->newline() << "might_sleep();";
+
// NB: This UTS_RELEASE compile-time macro directly checks only that
// the compile-time kbuild tree matches the compile-time debuginfo/etc.
// It does not check the run time kernel value. However, this is