#include <linux/spinlock.h>
#include <trace/events/sched.h>
#include <trace/events/syscalls.h>
+#include <linux/task_work.h>
/*
* Per-thread structure private to utrace implementation.
unsigned int death:1; /* in utrace_report_death() now */
unsigned int reap:1; /* release_task() has run */
unsigned int pending_attach:1; /* need splice_attaching() */
+ unsigned int task_work_added:1; /* called task_work_add() */
unsigned long utrace_flags;
struct hlist_node hlist; /* task_utrace_table linkage */
struct task_struct *task;
+
+ struct task_work work;
};
#define TASK_UTRACE_HASH_BITS 5
#define __UTRACE_REGISTERED 1
static atomic_t utrace_state = ATOMIC_INIT(__UTRACE_UNREGISTERED);
+#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
+typedef int (*task_work_add_fn)(struct task_struct *task,
+ struct task_work *twork, bool notify);
+#define task_work_add (* (task_work_add_fn)kallsyms_task_work_add)
+typedef struct task_work *(*task_work_cancel_fn)(struct task_struct *,
+ task_work_func_t);
+#define task_work_cancel (* (task_work_cancel_fn)kallsyms_task_work_cancel)
+#endif
+
int utrace_init(void)
{
int i;
char *report_exec_name;
#endif
+#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
+ /* The task_work_add()/task_work_cancel() functions aren't
+ * exported. Look up those function addresses. */
+ kallsyms_task_work_add = (void *)kallsyms_lookup_name ("task_work_add");
+ if (kallsyms_task_work_add == NULL) {
+ printk(KERN_ERR "%s can't resolve task_work_add!",
+ THIS_MODULE->name);
+ rc = -ENOENT;
+ goto error;
+ }
+ kallsyms_task_work_cancel = (void *)kallsyms_lookup_name ("task_work_cancel");
+ if (kallsyms_task_work_cancel == NULL) {
+ printk(KERN_ERR "%s can't resolve task_work_cancel!",
+ THIS_MODULE->name);
+ rc = -ENOENT;
+ goto error;
+ }
+#endif
+
/* initialize the list heads */
for (i = 0; i < TASK_UTRACE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&task_utrace_table[i]);
return 0;
}
+void utrace_resume(struct task_work *work);
+
/*
* Clean up everything associated with @task.utrace.
*
list_del(&engine->entry);
kmem_cache_free(utrace_engine_cachep, engine);
}
+
+ if (utrace->task_work_added) {
+ if (task_work_cancel(utrace->task, &utrace_resume) == NULL)
+ printk(KERN_ERR "%s:%d - task_work_cancel() failed? task %p, %d, %s\n",
+ __FUNCTION__, __LINE__, utrace->task,
+ utrace->task->tgid,
+ (utrace->task->comm ? utrace->task->comm
+ : "UNKNOWN"));
+ utrace->task_work_added = 0;
+ }
spin_unlock(&utrace->lock);
/* Free the struct utrace itself. */
kmem_cache_free(utrace_cachep, utrace);
}
+ init_task_work(&utrace->work, &utrace_resume, NULL);
return true;
}
list_empty(&utrace->attached),
list_empty(&utrace->attaching));
#endif
+
+ if (utrace->task_work_added) {
+ if (task_work_cancel(utrace->task, &utrace_resume) == NULL)
+ printk(KERN_ERR "%s:%d - task_work_cancel() failed? task %p, %d, %s\n",
+ __FUNCTION__, __LINE__, utrace->task,
+ utrace->task->tgid,
+ (utrace->task->comm ? utrace->task->comm
+ : "UNKNOWN"));
+ utrace->task_work_added = 0;
+ }
+
kmem_cache_free(utrace_cachep, utrace);
}
if (likely(task_is_stopped(target)))
__set_task_state(target, TASK_TRACED);
spin_unlock_irq(&target->sighand->siglock);
-#if 0
- /* FIXME: needed? If so, what to do here? */
} else if (utrace->resume > UTRACE_REPORT) {
utrace->resume = UTRACE_REPORT;
- set_notify_resume(target);
-#endif
+ if (! utrace->task_work_added) {
+ int rc = task_work_add(target, &utrace->work, true);
+ if (rc != 0)
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, rc);
+ else {
+ utrace->task_work_added = 1;
+ }
+ }
}
return task_is_traced(target);
* Ensure a reporting pass when we're resumed.
*/
utrace->resume = action;
- /* FIXME: needed? */
- set_thread_flag(TIF_NOTIFY_RESUME);
+ if (! utrace->task_work_added) {
+ int rc = task_work_add(task, &utrace->work, true);
+ if (rc != 0)
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, rc);
+ else {
+ utrace->task_work_added = 1;
+ }
+ }
}
/*
* In that case, utrace_get_signal() will be reporting soon.
*/
clear_engine_wants_stop(engine);
-#if 0
- /* FIXME: needed? If so, what to do here? */
if (action < utrace->resume) {
utrace->resume = action;
- set_notify_resume(target);
+ if (! utrace->task_work_added) {
+ ret = task_work_add(target, &utrace->work, true);
+ if (ret != 0)
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, ret);
+ else {
+ utrace->task_work_added = 1;
+ }
+ }
}
-#endif
break;
default:
if (resume < utrace->resume) {
spin_lock(&utrace->lock);
utrace->resume = resume;
- /* FIXME: Hmm, unsure about calling set_tsk_thread_flag()... */
- set_tsk_thread_flag(task, TIF_NOTIFY_RESUME);
+ if (! utrace->task_work_added) {
+ int rc = task_work_add(task, &utrace->work, true);
+ if (rc != 0)
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, rc);
+ else {
+ utrace->task_work_added = 1;
+ }
+ }
spin_unlock(&utrace->lock);
}
unsigned long clone_flags = 0;
INIT_REPORT(report);
-
/* FIXME: Figure out what the clone_flags were. For
* task_finder's purposes, all we need is CLONE_THREAD. */
if (task->mm == child->mm)
* We are close to user mode, and this is the place to report or stop.
* When we return, we're going to user mode or into the signals code.
*/
-void utrace_resume(struct task_struct *task, struct pt_regs *regs)
+void utrace_resume(struct task_work *work)
{
- struct utrace *utrace = task_utrace_struct(task);
+ /*
+ * We could also do 'task_utrace_struct()' here to find the
+ * task's 'struct utrace', but 'container_of()' should be
+ * instantaneous (where 'task_utrace_struct()' has to do a
+ * hash lookup).
+ */
+ struct utrace *utrace = container_of(work, struct utrace, work);
+ struct task_struct *task = current;
INIT_REPORT(report);
struct utrace_engine *engine;
+ might_sleep();
+ utrace->task_work_added = 0;
+
/*
* Some machines get here with interrupts disabled. The same arch
* code path leads to calling into get_signal_to_deliver(), which
#ifndef STAPCONF_TASK_UID
#include <linux/cred.h>
#endif
+#include <linux/task_work.h>
#include "syscall.h"
#include "task_finder_map.c"
__stp_call_mmap_callbacks_for_task(struct stap_task_finder_target *tgt,
struct task_struct *tsk);
+
+static inline void
+__stp_call_callbacks(struct stap_task_finder_target *tgt,
+ struct task_struct *tsk, int register_p, int process_p);
+
+static void
+__stp_task_worker(struct task_work *work)
+{
+ struct stap_task_finder_target *tgt = work->data;
+
+ might_sleep();
+ _stp_kfree(work);
+ if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
+ return;
+
+ __stp_tf_handler_start();
+
+ /* Call the callbacks. Assume that if the thread is a
+ * thread group leader, it is a process. */
+ __stp_call_callbacks(tgt, current, 1, (current->pid == current->tgid));
+
+ /* If this is just a thread other than the thread group
+ * leader, don't bother inform map callback clients about its
+ * memory map, since they will simply duplicate each other. */
+ if (tgt->mmap_events == 1 && current->tgid == current->pid) {
+ __stp_call_mmap_callbacks_for_task(tgt, current);
+ }
+
+ __stp_tf_handler_end();
+ return;
+}
+
static int
stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
{
_stp_error("utrace_set_events returned error %d on pid %d",
rc, (int)tsk->pid);
+ if (in_atomic() || irqs_disabled()) {
+ struct task_work *work;
- /* Call the callbacks. Assume that if the thread is a
- * thread group leader, it is a process. */
- __stp_call_callbacks(tgt, tsk, 1, (tsk->pid == tsk->tgid));
+ /* If we can't sleep, arrange for the task to truly
+ * stop so we can sleep. */
+ work = _stp_kmalloc(sizeof(*work));
+ if (work == NULL) {
+ _stp_error("Unable to allocate space for task_work");
+ return UTRACE_RESUME;
+ }
+ init_task_work(work, &__stp_task_worker, tgt);
+ /* FIXME: Hmm, let's say we exit between adding the
+ * task work and it firing. How do we cancel? */
+ rc = task_work_add(tsk, work, true);
+ if (rc != 0) {
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, rc);
+ }
+ }
+ else {
+ /* Call the callbacks. Assume that if the thread is a
+ * thread group leader, it is a process. */
+ __stp_call_callbacks(tgt, tsk, 1, (tsk->pid == tsk->tgid));
- /* If this is just a thread other than the thread group leader,
- don't bother inform map callback clients about its memory map,
- since they will simply duplicate each other. */
- if (tgt->mmap_events == 1 && tsk->tgid == tsk->pid) {
- __stp_call_mmap_callbacks_for_task(tgt, tsk);
+ /* If this is just a thread other than the thread
+ group leader, don't bother inform map callback
+ clients about its memory map, since they will
+ simply duplicate each other. */
+ if (tgt->mmap_events == 1 && tsk->tgid == tsk->pid) {
+ __stp_call_mmap_callbacks_for_task(tgt, tsk);
+ }
}
__stp_tf_handler_end();