]> sourceware.org Git - systemtap.git/blob - runtime/linux/runtime_context.h
update copyrights
[systemtap.git] / runtime / linux / runtime_context.h
1 /* -*- linux-c -*-
2 * Context Runtime Functions
3 * Copyright (C) 2014 Red Hat Inc.
4 *
5 * This file is part of systemtap, and is free software. You can
6 * redistribute it and/or modify it under the terms of the GNU General
7 * Public License (GPL); either version 2, or (at your option) any
8 * later version.
9 */
10
11 #ifndef _LINUX_RUNTIME_CONTEXT_H_
12 #define _LINUX_RUNTIME_CONTEXT_H_
13
14 static struct context *contexts[NR_CPUS] = { NULL };
15
16 static int _stp_runtime_contexts_alloc(void)
17 {
18 int cpu;
19
20 for_each_possible_cpu(cpu) {
21 /* Module init, so in user context, safe to use
22 * "sleeping" allocation. */
23 contexts[cpu] = _stp_kzalloc_gfp(sizeof(struct context),
24 STP_ALLOC_SLEEP_FLAGS);
25 if (contexts[cpu] == NULL) {
26 _stp_error ("context (size %lu) allocation failed",
27 (unsigned long) sizeof (struct context));
28 return -ENOMEM;
29 }
30 }
31 return 0;
32 }
33
34 static void _stp_runtime_contexts_free(void)
35 {
36 int cpu;
37
38 for_each_possible_cpu(cpu) {
39 if (contexts[cpu] != NULL) {
40 _stp_kfree(contexts[cpu]);
41 contexts[cpu] = NULL;
42 }
43 }
44 }
45
46 static struct context * _stp_runtime_entryfn_get_context(void)
47 {
48 struct context* __restrict__ c = NULL;
49 preempt_disable ();
50 c = contexts[smp_processor_id()];
51 if (c != NULL) {
52 if (atomic_inc_return(&c->busy) == 1)
53 return c;
54 atomic_dec(&c->busy);
55 }
56 preempt_enable_no_resched();
57 return NULL;
58 }
59
60 static inline void _stp_runtime_entryfn_put_context(struct context *c)
61 {
62 if (c && c == contexts[smp_processor_id()]) {
63 atomic_dec(&c->busy);
64 preempt_enable_no_resched();
65 }
66 /* else, warn about bad state? */
67 return;
68 }
69
70 static inline struct context * _stp_runtime_get_context(void)
71 {
72 return contexts[smp_processor_id()];
73 }
74
75 static void _stp_runtime_context_wait(void)
76 {
77 int holdon;
78 unsigned long hold_start;
79 int hold_index;
80
81 hold_start = jiffies;
82 hold_index = -1;
83 do {
84 int i;
85
86 holdon = 0;
87 for_each_possible_cpu(i) {
88 if (contexts[i] != NULL
89 && atomic_read (& contexts[i]->busy)) {
90 holdon = 1;
91
92 /* Just in case things are really
93 * stuck, let's print some diagnostics. */
94 if (time_after(jiffies, hold_start + HZ) // > 1 second
95 && (i > hold_index)) { // not already printed
96 hold_index = i;
97 printk(KERN_ERR "%s context[%d] stuck: %s\n", THIS_MODULE->name, i, contexts[i]->probe_point);
98 }
99 }
100 }
101
102 /*
103 * Just in case things are really really stuck, a
104 * handler probably suffered a fault, and the kernel
105 * probably killed a task/thread already. We can't be
106 * quite sure in what state everything is in, however
107 * auxiliary stuff like kprobes / uprobes / locks have
108 * already been unregistered. So it's *probably* safe
109 * to pretend/assume/hope everything is OK, and let
110 * the cleanup finish.
111 *
112 * In the worst case, there may occur a fault, as a
113 * genuinely running probe handler tries to access
114 * script globals (about to be freed), or something
115 * accesses module memory (about to be unloaded).
116 * This is sometimes stinky, so the alternative
117 * (default) is to change from a livelock to a
118 * livelock that sleeps awhile.
119 */
120 #ifdef STAP_OVERRIDE_STUCK_CONTEXT
121 if (time_after(jiffies, hold_start + HZ*10)) { // > 10 seconds
122 printk(KERN_ERR "%s overriding stuck context to allow module shutdown.", THIS_MODULE->name);
123 holdon = 0; // allow loop to exit
124 }
125 #else
126 /* at least stop sucking down the staprun cpu */
127 msleep(250);
128 #endif
129
130 /* NB: we run at least one of these during the
131 * shutdown sequence: */
132 yield(); /* aka schedule() and then some */
133 } while (holdon);
134 }
135
136 #endif /* _LINUX_RUNTIME_CONTEXT_H_ */
This page took 0.040697 seconds and 5 git commands to generate.