]> sourceware.org Git - systemtap.git/blob - bpf-base.cxx
update NEWS for PR24953
[systemtap.git] / bpf-base.cxx
1 // bpf translation pass
2 // Copyright (C) 2016-2019 Red Hat Inc.
3 //
4 // This file is part of systemtap, and is free software. You can
5 // redistribute it and/or modify it under the terms of the GNU General
6 // Public License (GPL); either version 2, or (at your option) any
7 // later version.
8
9 #include "config.h"
10 #include "bpf-internal.h"
11 #include "elaborate.h"
12 #include "session.h"
13 #include "util.h"
14
15 using namespace std;
16
17 namespace bpf {
18
19 std::ostream &
20 value::print(std::ostream &o) const
21 {
22 switch (type)
23 {
24 case UNINIT:
25 return o << "#";
26 case IMM:
27 return o << "$" << imm_val;
28 case STR:
29 return o << "$\"" << escaped_literal_string (str_val) << "\"";
30 case HARDREG:
31 return o << "r" << reg_val;
32 case TMPREG:
33 return o << "t" << reg_val;
34 default:
35 return o << "<BUG:unknown operand>";
36 }
37 }
38
39 insn::insn()
40 : code(-1), id(0), off(0),
41 dest(NULL), src0(NULL), src1(NULL),
42 prev(NULL), next(NULL)
43 { }
44
45 bool
46 is_jmp(opcode code)
47 {
48 if (BPF_CLASS (code) != BPF_JMP)
49 return false;
50 switch (BPF_OP (code))
51 {
52 case BPF_JA:
53 case BPF_JEQ:
54 case BPF_JGT:
55 case BPF_JGE:
56 case BPF_JSET:
57 case BPF_JNE:
58 case BPF_JSGT:
59 case BPF_JSGE:
60 return true;
61 default:
62 return false;
63 }
64 }
65
66 bool
67 is_move(opcode c)
68 {
69 switch (c)
70 {
71 case BPF_ALU64 | BPF_MOV | BPF_X:
72 case BPF_ALU64 | BPF_MOV | BPF_K:
73 case BPF_ALU | BPF_MOV | BPF_K:
74 case BPF_LD | BPF_IMM | BPF_DW:
75 case BPF_LD_MAP:
76 return true;
77 default:
78 return false;
79 }
80 }
81
82 bool
83 is_ldst(opcode c)
84 {
85 switch (BPF_CLASS (c))
86 {
87 case BPF_LDX:
88 case BPF_ST:
89 case BPF_STX:
90 return true;
91 default:
92 return false;
93 }
94 }
95
96 bool
97 is_binary(opcode code)
98 {
99 if (BPF_CLASS (code) != BPF_ALU64)
100 return false;
101 switch (BPF_OP (code))
102 {
103 case BPF_ADD:
104 case BPF_SUB:
105 case BPF_AND:
106 case BPF_OR:
107 case BPF_LSH:
108 case BPF_RSH:
109 case BPF_XOR:
110 case BPF_MUL:
111 case BPF_ARSH:
112 case BPF_DIV:
113 case BPF_MOD:
114 return true;
115 default:
116 return false;
117 }
118 }
119
120 bool
121 is_commutative(opcode code)
122 {
123 if (BPF_CLASS (code) != BPF_ALU64)
124 return false;
125 switch (BPF_OP (code))
126 {
127 case BPF_ADD:
128 case BPF_AND:
129 case BPF_OR:
130 case BPF_XOR:
131 case BPF_MUL:
132 return true;
133 default:
134 return false;
135 }
136 }
137
138 /* Various functions for eBPF helper lookup: */
139
140 std::map<unsigned, const char *> bpf_func_name_map;
141 std::map<std::string, bpf_func_id> bpf_func_id_map;
142
143 /* PR23829: On older kernels, bpf.h does not define __BPF_FUNC_MAPPER.
144 As a fallback, use the *earliest* __BPF_FUNC_MAPPER, so stapbpf
145 will not try helpers that only exist on subsequent kernels.
146
147 TODO: This isn't perfect since even older kernels don't have
148 some of these helpers.
149
150 XXX: Note the build limitation in that SystemTap must be compiled
151 against a recent kernel to be able to use the helpers from that
152 kernel. That's also the case when building against recent bpf.h
153 with __BPF_FUNC_MAPPER, so this workaround is not the source of the
154 problem. */
155 #ifndef __BPF_FUNC_MAPPER
156 #define __BPF_FUNC_MAPPER(FN) \
157 FN(unspec), \
158 FN(map_lookup_elem), \
159 FN(map_update_elem), \
160 FN(map_delete_elem), \
161 FN(probe_read), \
162 FN(ktime_get_ns), \
163 FN(trace_printk), \
164 FN(get_prandom_u32), \
165 FN(get_smp_processor_id), \
166 FN(skb_store_bytes), \
167 FN(l3_csum_replace), \
168 FN(l4_csum_replace), \
169 FN(tail_call), \
170 FN(clone_redirect), \
171 FN(get_current_pid_tgid), \
172 FN(get_current_uid_gid), \
173 FN(get_current_comm), \
174 FN(get_cgroup_classid), \
175 FN(skb_vlan_push), \
176 FN(skb_vlan_pop), \
177 FN(skb_get_tunnel_key), \
178 FN(skb_set_tunnel_key), \
179 FN(perf_event_read), \
180 FN(redirect), \
181 FN(get_route_realm), \
182 FN(perf_event_output), \
183 FN(skb_load_bytes), \
184 FN(get_stackid), \
185 FN(csum_diff), \
186 FN(skb_get_tunnel_opt), \
187 FN(skb_set_tunnel_opt), \
188 FN(skb_change_proto), \
189 FN(skb_change_type), \
190 FN(skb_under_cgroup), \
191 FN(get_hash_recalc), \
192 FN(get_current_task), \
193 FN(probe_write_user), \
194 FN(current_task_under_cgroup), \
195 FN(skb_change_tail), \
196 FN(skb_pull_data), \
197 FN(csum_update), \
198 FN(set_hash_invalid), \
199
200 #endif
201
202 void
203 init_bpf_helper_tables ()
204 {
205 #define __BPF_SET_FUNC_NAME(x) bpf_func_name_map[BPF_FUNC_ ## x] = #x
206 #define __BPF_SET_FUNC_ID(x) bpf_func_id_map[#x] = BPF_FUNC_ ## x
207 __BPF_FUNC_MAPPER(__BPF_SET_FUNC_NAME)
208 __STAPBPF_FUNC_MAPPER(__BPF_SET_FUNC_NAME)
209 __BPF_FUNC_MAPPER(__BPF_SET_FUNC_ID)
210 __STAPBPF_FUNC_MAPPER(__BPF_SET_FUNC_ID)
211 (void)0;
212 }
213
214 const char *
215 bpf_function_name (unsigned id)
216 {
217 if (bpf_func_name_map.count(id) != 0)
218 return bpf_func_name_map[id];
219 return NULL;
220 }
221
222 bpf_func_id
223 bpf_function_id (const std::string& name)
224 {
225 if (bpf_func_id_map.count(name) != 0)
226 return bpf_func_id_map[name];
227 return __BPF_FUNC_MAX_ID;
228 }
229
230 unsigned
231 bpf_function_nargs (unsigned id)
232 {
233 // ??? generalize to all bpf functions
234 switch (id)
235 {
236 case BPF_FUNC_map_lookup_elem: return 2;
237 case BPF_FUNC_map_update_elem: return 4;
238 case BPF_FUNC_map_delete_elem: return 2;
239 case BPF_FUNC_probe_read: return 3;
240 case BPF_FUNC_ktime_get_ns: return 0;
241 case BPF_FUNC_trace_printk: return 5;
242 case BPF_FUNC_get_prandom_u32: return 0;
243 case BPF_FUNC_get_smp_processor_id: return 0;
244 case BPF_FUNC_get_current_pid_tgid: return 0;
245 case BPF_FUNC_get_current_uid_gid: return 0;
246 case BPF_FUNC_get_current_comm: return 2;
247 case BPF_FUNC_perf_event_read: return 2;
248 case BPF_FUNC_perf_event_output: return 5;
249 default: return 5;
250 }
251 }
252
253
254 void
255 insn::mark_sets(bitset::set1_ref &s, bool v) const
256 {
257 if (is_call())
258 {
259 // Return value and call-clobbered registers.
260 for (unsigned i = BPF_REG_0; i <= BPF_REG_5; ++i)
261 s.set(i, v);
262 }
263 else if (dest)
264 s.set(dest->reg(), v);
265 }
266
267 void
268 insn::mark_uses(bitset::set1_ref &s, bool v) const
269 {
270 if (is_call())
271 {
272 unsigned n = off;
273 for (unsigned i = 0; i < n; ++i)
274 s.set(BPF_REG_1 + i, v);
275 }
276 else if (code == (BPF_JMP | BPF_EXIT))
277 s.set(BPF_REG_0, v);
278 else
279 {
280 if (src0 && src0->is_reg())
281 s.set(src0->reg(), v);
282 if (src1 && src1->is_reg())
283 s.set(src1->reg(), v);
284 }
285 }
286
287 static const char *
288 opcode_name(opcode op)
289 {
290 const char *opn;
291
292 switch (op)
293 {
294 case BPF_LDX | BPF_MEM | BPF_B: opn = "ldxb"; break;
295 case BPF_LDX | BPF_MEM | BPF_H: opn = "ldxh"; break;
296 case BPF_LDX | BPF_MEM | BPF_W: opn = "ldxw"; break;
297 case BPF_LDX | BPF_MEM | BPF_DW: opn = "ldx"; break;
298
299 case BPF_STX | BPF_MEM | BPF_B: opn = "stxb"; break;
300 case BPF_STX | BPF_MEM | BPF_H: opn = "stxh"; break;
301 case BPF_STX | BPF_MEM | BPF_W: opn = "stxw"; break;
302 case BPF_STX | BPF_MEM | BPF_DW: opn = "stx"; break;
303
304 case BPF_ST | BPF_MEM | BPF_B: opn = "stkb"; break;
305 case BPF_ST | BPF_MEM | BPF_H: opn = "stkh"; break;
306 case BPF_ST | BPF_MEM | BPF_W: opn = "stkw"; break;
307 case BPF_ST | BPF_MEM | BPF_DW: opn = "stk"; break;
308
309 case BPF_ALU64 | BPF_ADD | BPF_X: opn = "addx"; break;
310 case BPF_ALU64 | BPF_ADD | BPF_K: opn = "addk"; break;
311 case BPF_ALU64 | BPF_SUB | BPF_X: opn = "subx"; break;
312 case BPF_ALU64 | BPF_SUB | BPF_K: opn = "subk"; break;
313 case BPF_ALU64 | BPF_AND | BPF_X: opn = "andx"; break;
314 case BPF_ALU64 | BPF_AND | BPF_K: opn = "andk"; break;
315 case BPF_ALU64 | BPF_OR | BPF_X: opn = "orx"; break;
316 case BPF_ALU64 | BPF_OR | BPF_K: opn = "ork"; break;
317 case BPF_ALU64 | BPF_LSH | BPF_X: opn = "lshx"; break;
318 case BPF_ALU64 | BPF_LSH | BPF_K: opn = "lshk"; break;
319 case BPF_ALU64 | BPF_RSH | BPF_X: opn = "rshx"; break;
320 case BPF_ALU64 | BPF_RSH | BPF_K: opn = "rshk"; break;
321 case BPF_ALU64 | BPF_XOR | BPF_X: opn = "xorx"; break;
322 case BPF_ALU64 | BPF_XOR | BPF_K: opn = "xork"; break;
323 case BPF_ALU64 | BPF_MUL | BPF_X: opn = "mulx"; break;
324 case BPF_ALU64 | BPF_MUL | BPF_K: opn = "mulk"; break;
325 case BPF_ALU64 | BPF_MOV | BPF_X: opn = "movx"; break;
326 case BPF_ALU64 | BPF_MOV | BPF_K: opn = "movk"; break;
327 case BPF_ALU64 | BPF_ARSH | BPF_X: opn = "arshx"; break;
328 case BPF_ALU64 | BPF_ARSH | BPF_K: opn = "arshk"; break;
329 case BPF_ALU64 | BPF_DIV | BPF_X: opn = "divx"; break;
330 case BPF_ALU64 | BPF_DIV | BPF_K: opn = "divk"; break;
331 case BPF_ALU64 | BPF_MOD | BPF_X: opn = "modx"; break;
332 case BPF_ALU64 | BPF_MOD | BPF_K: opn = "modk"; break;
333 case BPF_ALU64 | BPF_NEG: opn = "negx"; break;
334
335 case BPF_ALU | BPF_MOV | BPF_X: opn = "movwx"; break;
336 case BPF_ALU | BPF_MOV | BPF_K: opn = "movwk"; break;
337
338 case BPF_LD | BPF_IMM | BPF_DW: opn = "movdk"; break;
339 case BPF_LD_MAP: opn = "movmap"; break;
340
341 case BPF_JMP | BPF_CALL: opn = "call"; break;
342 case BPF_JMP | BPF_CALL | BPF_X: opn = "tcall"; break;
343 case BPF_JMP | BPF_EXIT: opn = "exit"; break;
344
345 case BPF_JMP | BPF_JA: opn = "jmp"; break;
346 case BPF_JMP | BPF_JEQ | BPF_X: opn = "jeqx"; break;
347 case BPF_JMP | BPF_JEQ | BPF_K: opn = "jeqk"; break;
348 case BPF_JMP | BPF_JNE | BPF_X: opn = "jnex"; break;
349 case BPF_JMP | BPF_JNE | BPF_K: opn = "jnek"; break;
350 case BPF_JMP | BPF_JGT | BPF_X: opn = "jugtx"; break;
351 case BPF_JMP | BPF_JGT | BPF_K: opn = "jugtk"; break;
352 case BPF_JMP | BPF_JGE | BPF_X: opn = "jugex"; break;
353 case BPF_JMP | BPF_JGE | BPF_K: opn = "jugek"; break;
354 case BPF_JMP | BPF_JSGT | BPF_X: opn = "jsgtx"; break;
355 case BPF_JMP | BPF_JSGT | BPF_K: opn = "jsgtk"; break;
356 case BPF_JMP | BPF_JSGE | BPF_X: opn = "jsgex"; break;
357 case BPF_JMP | BPF_JSGE | BPF_K: opn = "jsgek"; break;
358 case BPF_JMP | BPF_JSET | BPF_X: opn = "jsetx"; break;
359 case BPF_JMP | BPF_JSET | BPF_K: opn = "jsetk"; break;
360
361 default:
362 opn = "<BUG:unknown opcode>";
363 }
364
365 return opn;
366 }
367
368 std::ostream &
369 insn::print(std::ostream &o) const
370 {
371 #ifdef DEBUG_CODEGEN
372 if (note != "")
373 o << "{" << note << "} ";
374 #endif
375 const char *opn = opcode_name (code);
376
377 switch (code)
378 {
379 case BPF_LDX | BPF_MEM | BPF_B:
380 case BPF_LDX | BPF_MEM | BPF_H:
381 case BPF_LDX | BPF_MEM | BPF_W:
382 case BPF_LDX | BPF_MEM | BPF_DW:
383 return o << opn << "\t" << *dest
384 << ",[" << *src1
385 << showpos << off << noshowpos << "]";
386
387 case BPF_STX | BPF_MEM | BPF_B:
388 case BPF_STX | BPF_MEM | BPF_H:
389 case BPF_STX | BPF_MEM | BPF_W:
390 case BPF_STX | BPF_MEM | BPF_DW:
391 case BPF_ST | BPF_MEM | BPF_B:
392 case BPF_ST | BPF_MEM | BPF_H:
393 case BPF_ST | BPF_MEM | BPF_W:
394 case BPF_ST | BPF_MEM | BPF_DW:
395 return o << opn << "\t[" << *src0
396 << showpos << off << noshowpos
397 << "]," << *src1;
398
399 case BPF_ALU | BPF_MOV | BPF_X:
400 case BPF_ALU | BPF_MOV | BPF_K:
401 case BPF_ALU64 | BPF_MOV | BPF_X:
402 case BPF_ALU64 | BPF_MOV | BPF_K:
403 case BPF_LD | BPF_IMM | BPF_DW:
404 case BPF_LD_MAP:
405 return o << opn << "\t" << *dest << "," << *src1;
406
407 case BPF_ALU64 | BPF_NEG:
408 return o << opn << "\t" << *dest << "," << *src0;
409
410 case BPF_ALU64 | BPF_ADD | BPF_X:
411 case BPF_ALU64 | BPF_ADD | BPF_K:
412 case BPF_ALU64 | BPF_SUB | BPF_X:
413 case BPF_ALU64 | BPF_SUB | BPF_K:
414 case BPF_ALU64 | BPF_AND | BPF_X:
415 case BPF_ALU64 | BPF_AND | BPF_K:
416 case BPF_ALU64 | BPF_OR | BPF_X:
417 case BPF_ALU64 | BPF_OR | BPF_K:
418 case BPF_ALU64 | BPF_LSH | BPF_X:
419 case BPF_ALU64 | BPF_LSH | BPF_K:
420 case BPF_ALU64 | BPF_RSH | BPF_X:
421 case BPF_ALU64 | BPF_RSH | BPF_K:
422 case BPF_ALU64 | BPF_XOR | BPF_X:
423 case BPF_ALU64 | BPF_XOR | BPF_K:
424 case BPF_ALU64 | BPF_MUL | BPF_X:
425 case BPF_ALU64 | BPF_MUL | BPF_K:
426 case BPF_ALU64 | BPF_ARSH | BPF_X:
427 case BPF_ALU64 | BPF_ARSH | BPF_K:
428 case BPF_ALU64 | BPF_DIV | BPF_X:
429 case BPF_ALU64 | BPF_DIV | BPF_K:
430 case BPF_ALU64 | BPF_MOD | BPF_K:
431 case BPF_ALU64 | BPF_MOD | BPF_X:
432 return o << opn << "\t" << *dest << "," << *src0 << "," << *src1;
433
434 case BPF_JMP | BPF_CALL:
435 case BPF_JMP | BPF_CALL | BPF_X:
436 o << opn << "\t";
437 if (const char *name = bpf_function_name(src1->imm()))
438 o << name;
439 else
440 o << *src1;
441 return o << "," << off;
442
443 case BPF_JMP | BPF_EXIT:
444 case BPF_JMP | BPF_JA:
445 return o << opn;
446
447 case BPF_JMP | BPF_JEQ | BPF_X:
448 case BPF_JMP | BPF_JEQ | BPF_K:
449 case BPF_JMP | BPF_JNE | BPF_X:
450 case BPF_JMP | BPF_JNE | BPF_K:
451 case BPF_JMP | BPF_JGT | BPF_X:
452 case BPF_JMP | BPF_JGT | BPF_K:
453 case BPF_JMP | BPF_JGE | BPF_X:
454 case BPF_JMP | BPF_JGE | BPF_K:
455 case BPF_JMP | BPF_JSGT | BPF_X:
456 case BPF_JMP | BPF_JSGT | BPF_K:
457 case BPF_JMP | BPF_JSGE | BPF_X:
458 case BPF_JMP | BPF_JSGE | BPF_K:
459 case BPF_JMP | BPF_JSET | BPF_X:
460 case BPF_JMP | BPF_JSET | BPF_K:
461 return o << opn << "\t" << *src0 << "," << *src1;
462
463 default:
464 return o << "<BUG:unknown instruction format>";
465 }
466 }
467
468 edge::edge(block *p, block *n)
469 : prev(p), next(n)
470 {
471 n->prevs.insert (this);
472 }
473
474 edge::~edge()
475 {
476 next->prevs.erase (this);
477 if (prev->taken == this)
478 prev->taken = NULL;
479 if (prev->fallthru == this)
480 prev->fallthru = NULL;
481 }
482
483 void
484 edge::redirect_next(block *n)
485 {
486 next->prevs.erase (this);
487 next = n;
488 n->prevs.insert (this);
489 }
490
491 block::block(int i)
492 : first(NULL), last(NULL), taken(NULL), fallthru(NULL), id(i)
493 { }
494
495 block::~block()
496 {
497 for (insn *n, *i = first; i ; i = n)
498 {
499 n = i->next;
500 delete i;
501 }
502 delete taken;
503 delete fallthru;
504 }
505
506 block *
507 block::is_forwarder() const
508 {
509 if (first == NULL)
510 {
511 if (fallthru)
512 return fallthru->next;
513 }
514 else if (first == last && first->code == (BPF_JMP | BPF_JA))
515 return taken->next;
516 return NULL;
517 }
518
519 void
520 block::print(ostream &o) const
521 {
522 if (prevs.empty ())
523 o << "\t[prevs: entry]\n";
524 else
525 {
526 o << "\t[prevs:";
527 for (edge_set::const_iterator i = prevs.begin(); i != prevs.end(); ++i)
528 o << ' ' << (*i)->prev->id;
529 o << "]\n";
530 }
531
532 o << id << ':' << endl;
533 for (insn *i = first; i != NULL; i = i->next)
534 o << '\t' << *i << endl;
535
536 if (taken)
537 o << "\t[taken: " << taken->next->id << "]" << endl;
538 if (fallthru)
539 o << "\t[fallthru: " << fallthru->next->id << "]" << endl;
540 else if (!taken)
541 o << "\t[end]" << endl;
542 }
543
544 insn *
545 insn_inserter::new_insn()
546 {
547 insn *n = new insn;
548 #ifdef DEBUG_CODEGEN
549 if (!notes.empty())
550 n->note = notes.top();
551 else
552 n->note = "";
553 #endif
554 insert(n);
555 return n;
556 }
557
558 void
559 insn_before_inserter::insert(insn *n)
560 {
561 assert(i != NULL);
562 insn *p = i->prev;
563 i->prev = n;
564 n->prev = p;
565 n->next = i;
566 if (p == NULL)
567 b->first = n;
568 else
569 p->next = n;
570 }
571
572 void
573 insn_after_inserter::insert(insn *p)
574 {
575 if (i == NULL)
576 {
577 assert(b->first == NULL && b->last == NULL);
578 b->first = b->last = p;
579 }
580 else
581 {
582 insn *n = i->next;
583 i->next = p;
584 p->prev = i;
585 p->next = n;
586 if (n == NULL)
587 b->last = p;
588 else
589 n->prev = p;
590 }
591 i = p;
592 }
593
594 program::program(enum bpf_target target)
595 : target(target), hardreg_vals(MAX_BPF_REG), max_tmp_space(0)
596 {
597 for (unsigned i = 0; i < MAX_BPF_REG; ++i)
598 hardreg_vals[i] = value::mk_hardreg(i);
599 }
600
601 program::~program()
602 {
603 // XXX We need to suffer a memory leak here, as blocks / edges are
604 // tightly interlinked structures, and their dtors like to invoke
605 // functions on each other. This will need a rethink, as this is
606 // the type of problem domain where a garbage collected runtime
607 // shines, and most other languages don't.
608 #if 0
609 for (auto i = blocks.begin (); i != blocks.end (); ++i)
610 delete *i;
611 for (auto i = reg_vals.begin (); i != reg_vals.end (); ++i)
612 delete *i;
613 for (auto i = imm_map.begin (); i != imm_map.end (); ++i)
614 delete i->second;
615 for (auto i = str_map.begin (); i != str_map.end (); ++i)
616 delete i->second;
617 #endif
618 }
619
620 block *
621 program::new_block ()
622 {
623 block *r = new block(blocks.size ());
624 blocks.push_back (r);
625 return r;
626 }
627
628 value *
629 program::lookup_reg(regno r)
630 {
631 if (r < MAX_BPF_REG)
632 return &hardreg_vals[r];
633 else
634 return reg_vals[r - MAX_BPF_REG];
635 }
636
637 value *
638 program::new_reg()
639 {
640 regno r = max_reg();
641 value *v = new value(value::mk_reg(r));
642 reg_vals.push_back(v);
643 return v;
644 }
645
646 value *
647 program::new_imm(int64_t i)
648 {
649 auto old = imm_map.find(i);
650 if (old != imm_map.end())
651 return old->second;
652
653 value *v = new value(value::mk_imm(i));
654 auto ok = imm_map.insert(std::pair<int64_t, value *>(i, v));
655 assert(ok.second);
656 return v;
657 }
658
659 value *
660 program::new_str(std::string str, bool format_str)
661 {
662 std::unordered_map<std::string, value *>& m = str_map;
663 if (format_str) m = format_map;
664
665 auto old = m.find(str);
666 if (old != m.end())
667 return old->second;
668
669 value *v = new value(value::mk_str(str, format_str));
670 auto ok = m.insert(std::pair<std::string, value *>(str, v));
671 assert(ok.second);
672 return v;
673 }
674
675 void
676 program::mk_ld(insn_inserter &ins, int sz, value *dest, value *base, int off)
677 {
678 insn *i = ins.new_insn();
679 i->code = BPF_LDX | BPF_MEM | sz;
680 i->off = off;
681 i->dest = dest;
682 i->src1 = base;
683 }
684
685 void
686 program::mk_st(insn_inserter &ins, int sz, value *base, int off, value *src)
687 {
688 insn *i = ins.new_insn();
689 i->code = (src->is_imm() ? BPF_ST : BPF_STX) | BPF_MEM | sz;
690 i->off = off;
691 i->src0 = base;
692 i->src1 = src;
693 }
694
695 void
696 program::mk_binary(insn_inserter &ins, opcode op, value *dest,
697 value *s0, value *s1)
698 {
699 if (op == BPF_SUB)
700 {
701 if (s0->is_imm() && s0->imm() == 0)
702 {
703 mk_unary(ins, BPF_NEG, dest, s1);
704 return;
705 }
706 }
707 else if (is_commutative(op)
708 && ((s1->is_reg() && !s0->is_reg()) || dest == s1))
709 std::swap (s1, s0);
710
711 insn *i = ins.new_insn();
712 i->code = BPF_ALU64 | op | (s1->is_imm() ? BPF_K : BPF_X);
713 i->dest = dest;
714 i->src0 = s0;
715 i->src1 = s1;
716 }
717
718 void
719 program::mk_unary(insn_inserter &ins, opcode op, value *dest, value *src)
720 {
721 assert (op == BPF_NEG); // XXX: BPF_NEG is the only unary operator so far.
722
723 if (dest != src) // src is not used for BPF_NEG. BPF negates in-place.
724 mk_mov(ins, dest, src);
725
726 insn *i = ins.new_insn();
727 i->code = BPF_ALU64 | op; // BPF_X is not used for BPF_NEG.
728 i->dest = dest;
729 i->src0 = dest; // XXX: dest as an ersatz 'source'.
730 }
731
732 void
733 program::mk_mov(insn_inserter &ins, value *dest, value *src)
734 {
735 if (dest == src)
736 return;
737
738 opcode code = BPF_ALU64 | BPF_MOV | BPF_X;
739 if (src->is_imm())
740 {
741 int64_t i = src->imm();
742 if (i == (int32_t)i)
743 code = BPF_ALU64 | BPF_MOV | BPF_K;
744 else if (i == (uint32_t)i)
745 code = BPF_ALU | BPF_MOV | BPF_K;
746 else
747 code = BPF_LD | BPF_IMM | BPF_DW;
748 }
749
750 insn *i = ins.new_insn();
751 i->code = code;
752 i->dest = dest;
753 i->src1 = src;
754 }
755
756 void
757 program::mk_jmp(insn_inserter &ins, block *dest)
758 {
759 insn *i = ins.new_insn();
760 i->code = BPF_JMP | BPF_JA;
761
762 block *b = ins.get_block();
763 b->taken = new edge(b, dest);
764 }
765
766 void
767 program::mk_call(insn_inserter &ins, enum bpf_func_id id, unsigned nargs)
768 {
769 insn *i = ins.new_insn();
770 i->code = BPF_JMP | BPF_CALL;
771 i->src1 = new_imm((int)id);
772 i->off = nargs;
773 }
774
775 void
776 program::mk_exit(insn_inserter &ins)
777 {
778 insn *i = ins.new_insn();
779 i->code = BPF_JMP | BPF_EXIT;
780 }
781
782 void
783 program::mk_jcond(insn_inserter &ins, condition c, value *s0, value *s1,
784 block *t, block *f)
785 {
786 bool inv = false;
787 opcode code;
788
789 if (s1->is_reg() && !s0->is_reg())
790 {
791 std::swap (s1, s0);
792 switch (c)
793 {
794 case EQ: break;
795 case NE: break;
796 case TEST: break;
797 case LT: c = GT; break;
798 case LE: c = GE; break;
799 case GT: c = LT; break;
800 case GE: c = LE; break;
801 case LTU: c = GTU; break;
802 case LEU: c = GEU; break;
803 case GTU: c = LTU; break;
804 case GEU: c = LEU; break;
805 default: abort();
806 }
807 }
808
809 switch (c)
810 {
811 case EQ:
812 code = BPF_JEQ;
813 break;
814 case NE:
815 code = BPF_JNE;
816 break;
817 case LE:
818 inv = true;
819 /* Fallthrough */
820 case GT:
821 code = BPF_JSGT;
822 break;
823 case LT:
824 inv = true;
825 /* Fallthrough */
826 case GE:
827 code = BPF_JSGE;
828 break;
829 case LEU:
830 inv = true;
831 /* Fallthrough */
832 case GTU:
833 code = BPF_JGT;
834 break;
835 case LTU:
836 inv = true;
837 /* Fallthrough */
838 case GEU:
839 code = BPF_JGE;
840 break;
841 case TEST:
842 code = BPF_JSET;
843 break;
844 default:
845 abort ();
846 }
847
848 if (inv)
849 std::swap (t, f);
850
851 block *b = ins.get_block();
852 b->taken = new edge(b, t);
853 b->fallthru = new edge(b, f);
854
855 insn *i = ins.new_insn();
856 i->code = BPF_JMP | code | (s1->is_imm() ? BPF_K : BPF_X);
857 i->src0 = s0;
858 i->src1 = s1;
859 }
860
861 void
862 program::load_map(insn_inserter &ins, value *dest, int src)
863 {
864 assert (src >= 0); // PR23476: Ensure a stray stats reference doesn't slip through.
865 insn *i = ins.new_insn();
866 i->code = BPF_LD_MAP;
867 i->dest = dest;
868 i->src1 = new_imm(src);
869 }
870
871 void
872 program::print(ostream &o) const
873 {
874 for (unsigned n = blocks.size(), i = 0; i < n; ++i)
875 {
876 block *b = blocks[i];
877 if (b)
878 o << *b << endl;
879 }
880 }
881 } // namespace bpf
This page took 0.069512 seconds and 5 git commands to generate.