* runtime/stp_task_work.c: New file.
* runtime/stp_utrace.c (utrace_init): Move STAPCONF_TASK_WORK_ADD_EXPORTED
code to stp_task_work_init().
(utrace_exit): Call stp_task_work_exit().
(utrace_cleanup): Call task_work_cancel() wrapper function.
(utrace_free): Ditto.
(utrace_do_stop): Call task_work_add() wrapper function.
(utrace_control): Ditto.
(finish_report): Ditto.
(utrace_resume): Call stp_task_work_func_done().
* runtime/linux/task_finder2.c (__stp_tf_cancel_task_work): Call
task_work_cancel() wrapper function.
(__stp_tf_quiesce_worker): Call stp_task_work_func_done().
(__stp_utrace_task_finder_target_quiesce): Call task_work_add() wrapper
function.
(__stp_tf_mmap_worker): Call stp_task_work_func_done().
(__stp_utrace_task_finder_target_syscall_exit): Call task_work_add() wrapper
function.
#ifndef STAPCONF_TASK_UID
#include <linux/cred.h>
#endif
-#include <linux/task_work.h>
#include "syscall.h"
#include "task_finder_map.c"
list_for_each_entry(node, &__stp_tf_task_work_list, list) {
// Remove the item from the list, cancel it, then free it.
list_del(&node->list);
- task_work_cancel(node->task, node->work.func);
+ stp_task_work_cancel(node->task, node->work.func);
_stp_kfree(node);
}
spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
might_sleep();
__stp_tf_free_task_work(work);
- if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
+ if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
+ }
__stp_tf_handler_start();
}
__stp_tf_handler_end();
+
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
}
return UTRACE_RESUME;
}
init_task_work(work, &__stp_tf_quiesce_worker, tgt);
- rc = task_work_add(tsk, work, true);
- /* task_work_add() returns -ESRCH if the task has
+
+ rc = stp_task_work_add(tsk, work);
+ /* stp_task_work_add() returns -ESRCH if the task has
* already passed exit_task_work(). Just ignore this
* error. */
if (rc != 0 && rc != -ESRCH) {
- printk(KERN_ERR "%s:%d - task_work_add() returned %d\n",
+ printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
}
might_sleep();
__stp_tf_free_task_work(work);
- if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
+ if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
+ }
// See if we can find saved syscall info.
entry = __stp_tf_get_map_entry(current);
- if (entry == NULL)
+ if (entry == NULL) {
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
+ }
__stp_tf_handler_start();
__stp_tf_remove_map_entry(entry);
__stp_tf_handler_end();
+
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
}
return UTRACE_RESUME;
}
init_task_work(work, &__stp_tf_mmap_worker, tgt);
- rc = task_work_add(tsk, work, true);
- /* task_work_add() returns -ESRCH if the task has
+ rc = stp_task_work_add(tsk, work);
+ /* stp_task_work_add() returns -ESRCH if the task has
* already passed exit_task_work(). Just ignore this
* error. */
if (rc != 0 && rc != -ESRCH) {
- printk(KERN_ERR "%s:%d - task_work_add() returned %d\n",
+ printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
}
--- /dev/null
+#ifndef _STP_TASK_WORK_C
+#define _STP_TASK_WORK_C
+
+#include <linux/task_work.h>
+
+#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
+typedef int (*task_work_add_fn)(struct task_struct *task,
+ struct task_work *twork, bool notify);
+#define task_work_add (* (task_work_add_fn)kallsyms_task_work_add)
+typedef struct task_work *(*task_work_cancel_fn)(struct task_struct *,
+ task_work_func_t);
+#define task_work_cancel (* (task_work_cancel_fn)kallsyms_task_work_cancel)
+#endif
+
+/* To avoid a crash when a task_work callback gets called after the
+ * module is unloaded, keep track of the number of current callbacks. */
+static atomic_t stp_task_work_callbacks = ATOMIC_INIT(0);
+
+/*
+ * stp_task_work_init() should be called before any other
+ * stp_task_work_* functions are called to do setup.
+ */
+int
+stp_task_work_init(void)
+{
+#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
+ /* The task_work_add()/task_work_cancel() functions aren't
+ * exported. Look up those function addresses. */
+ kallsyms_task_work_add = (void *)kallsyms_lookup_name("task_work_add");
+ if (kallsyms_task_work_add == NULL) {
+ _stp_error("Can't resolve task_work_add!");
+ return -ENOENT;
+ }
+ kallsyms_task_work_cancel = (void *)kallsyms_lookup_name("task_work_cancel");
+ if (kallsyms_task_work_cancel == NULL) {
+ _stp_error("Can't resolve task_work_cancel!");
+ return -ENOENT;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * stap_task_work_exit() should be called when no more
+ * stp_task_work_* functions will be called (before module exit).
+ *
+ * This function makes sure that all the callbacks are finished before
+ * letting the module unload. If the module unloads before a callback
+ * is called, the kernel will try to make a function call to an
+ * invalid address.
+ */
+void
+stp_task_work_exit(void)
+{
+ while (atomic_read(&stp_task_work_callbacks))
+ schedule_timeout_uninterruptible(1);
+ return;
+}
+
+/*
+ * Our task_work_add() wrapper that remembers that we've got a pending
+ * callback.
+ */
+int
+stp_task_work_add(struct task_struct *task, struct task_work *twork)
+{
+ int rc;
+
+ rc = task_work_add(task, twork, true);
+ if (rc == 0)
+ atomic_inc(&stp_task_work_callbacks);
+ return rc;
+}
+
+/*
+ * Our task_work_cancel() wrapper that remembers that a callback has
+ * been cancelled.
+ */
+struct task_work *
+stp_task_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+ struct task_work *twork;
+
+ twork = task_work_cancel(task, func);
+ if (twork != NULL)
+ atomic_dec(&stp_task_work_callbacks);
+ return twork;
+}
+
+/*
+ * stp_task_work_func_done() should be called at the very end of a
+ * task_work callback function so that we can keep up with callback
+ * accounting.
+ */
+void
+stp_task_work_func_done(void)
+{
+ atomic_dec(&stp_task_work_callbacks);
+}
+
+
+#endif /* _STP_TASK_WORK_C */
#include <linux/spinlock.h>
#include <trace/events/sched.h>
#include <trace/events/syscalls.h>
-#include <linux/task_work.h>
+#include "stp_task_work.c"
/*
* Per-thread structure private to utrace implementation.
#define __UTRACE_REGISTERED 1
static atomic_t utrace_state = ATOMIC_INIT(__UTRACE_UNREGISTERED);
-#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
-typedef int (*task_work_add_fn)(struct task_struct *task,
- struct task_work *twork, bool notify);
-#define task_work_add (* (task_work_add_fn)kallsyms_task_work_add)
-typedef struct task_work *(*task_work_cancel_fn)(struct task_struct *,
- task_work_func_t);
-#define task_work_cancel (* (task_work_cancel_fn)kallsyms_task_work_cancel)
-#endif
-
int utrace_init(void)
{
int i;
int rc = -1;
-#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
- /* The task_work_add()/task_work_cancel() functions aren't
- * exported. Look up those function addresses. */
- kallsyms_task_work_add = (void *)kallsyms_lookup_name ("task_work_add");
- if (kallsyms_task_work_add == NULL) {
- printk(KERN_ERR "%s can't resolve task_work_add!",
- THIS_MODULE->name);
- rc = -ENOENT;
+ if (unlikely(stp_task_work_init() != 0))
goto error;
- }
- kallsyms_task_work_cancel = (void *)kallsyms_lookup_name ("task_work_cancel");
- if (kallsyms_task_work_cancel == NULL) {
- printk(KERN_ERR "%s can't resolve task_work_cancel!",
- THIS_MODULE->name);
- rc = -ENOENT;
- goto error;
- }
-#endif
/* initialize the list heads */
for (i = 0; i < TASK_UTRACE_TABLE_SIZE; i++) {
kmem_cache_destroy(utrace_cachep);
if (utrace_engine_cachep)
kmem_cache_destroy(utrace_engine_cachep);
+
+ stp_task_work_exit();
return 0;
}
}
if (utrace->task_work_added) {
- if (task_work_cancel(utrace->task, &utrace_resume) == NULL)
+ if (stp_task_work_cancel(utrace->task, &utrace_resume) == NULL)
printk(KERN_ERR "%s:%d - task_work_cancel() failed? task %p, %d, %s\n",
__FUNCTION__, __LINE__, utrace->task,
utrace->task->tgid,
#endif
if (utrace->task_work_added) {
- if (task_work_cancel(utrace->task, &utrace_resume) == NULL)
+ if (stp_task_work_cancel(utrace->task, &utrace_resume) == NULL)
printk(KERN_ERR "%s:%d - task_work_cancel() failed? task %p, %d, %s\n",
__FUNCTION__, __LINE__, utrace->task,
utrace->task->tgid,
} else if (utrace->resume > UTRACE_REPORT) {
utrace->resume = UTRACE_REPORT;
if (! utrace->task_work_added) {
- int rc = task_work_add(target, &utrace->work, true);
+ int rc = stp_task_work_add(target, &utrace->work);
if (rc == 0) {
utrace->task_work_added = 1;
}
- /* task_work_add() returns -ESRCH if the task
+ /* stp_task_work_add() returns -ESRCH if the task
* has already passed exit_task_work(). Just
* ignore this error. */
else if (rc != -ESRCH) {
*/
utrace->resume = action;
if (! utrace->task_work_added) {
- int rc = task_work_add(task, &utrace->work, true);
+ int rc = stp_task_work_add(task, &utrace->work);
if (rc == 0) {
utrace->task_work_added = 1;
}
- /* task_work_add() returns -ESRCH if the task
+ /* stp_task_work_add() returns -ESRCH if the task
* has already passed exit_task_work(). Just
* ignore this error. */
else if (rc != -ESRCH) {
printk(KERN_ERR
- "%s:%d - task_work_add() returned %d\n",
+ "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
}
if (action < utrace->resume) {
utrace->resume = action;
if (! utrace->task_work_added) {
- ret = task_work_add(target, &utrace->work,
- true);
+ ret = stp_task_work_add(target, &utrace->work);
if (ret == 0) {
utrace->task_work_added = 1;
}
- /* task_work_add() returns -ESRCH if
+ /* stp_task_work_add() returns -ESRCH if
* the task has already passed
* exit_task_work(). Just ignore this
* error. */
else if (ret != -ESRCH) {
printk(KERN_ERR
- "%s:%d - task_work_add() returned %d\n",
+ "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, ret);
}
}
spin_lock(&utrace->lock);
utrace->resume = resume;
if (! utrace->task_work_added) {
- int rc = task_work_add(task, &utrace->work, true);
+ int rc = stp_task_work_add(task, &utrace->work);
if (rc == 0) {
utrace->task_work_added = 1;
}
- /* task_work_add() returns -ESRCH if the task
+ /* stp_task_work_add() returns -ESRCH if the task
* has already passed exit_task_work(). Just
* ignore this error. */
else if (rc != -ESRCH) {
* call. (The arch might use TIF_NOTIFY_RESUME for other
* purposes as well as calling us.)
*/
+
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
return;
case UTRACE_REPORT:
if (unlikely(!(utrace->utrace_flags & UTRACE_EVENT(QUIESCE))))
* effect now (i.e. step or interrupt).
*/
finish_resume_report(task, utrace, &report);
+
+ /* Remember that this task_work_func is finished. */
+ stp_task_work_func_done();
}
#endif /* _STP_UTRACE_C */