forked from advanced-microcode-patching/shiva
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathshiva_module.c
2861 lines (2694 loc) · 98.5 KB
/
shiva_module.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "shiva.h"
#include "shiva_debug.h"
#include "modules/include/shiva_module.h"
#include <sys/mman.h>
#define RELOC_MASK(n) ((1U << n) - 1)
/*
* The purpose of this code is to take a relocatable object,
* and turn it into a runtime executable module. This means
* that we must organize SHF_ALLOC and SHF_ALLOC|SHF_ sections into the text
* segment, and SHF_WRITE sections into the data segment.
*/
#ifdef __x86_64__
/*
* Our custom PLT stubs are a simple IP relative indirect
* JMP into the global offset table.
* i.e. jmp *0x0(%rip)
* NOTE: Our linker uses strict linking.
*/
uint8_t plt_stub[6] = "\xff\x25\x00\x00\x00\x00";
#elif __aarch64__
/*
* Our PLT stub requires 8 bytes in ARM64 assembly.
*/
uint8_t plt_stub[8] = "\x11\x00\x00\x58" /* ldr x17, got_entry_mem */
"\x20\x02\x1f\xd6"; /* br x17 */
#endif
static bool module_has_transforms(struct shiva_module *);
static bool get_section_mapping(struct shiva_module *, char *, struct shiva_module_section_mapping *);
static bool enable_post_linker(struct shiva_module *);
/*
* Returns the name of the ELF section that the symbol lives in, within the
* loaded ET_REL module.
*/
static char *
module_symbol_shndx_str(struct shiva_module *linker, struct elf_symbol *symbol)
{
struct elf_section *section = shiva_malloc(sizeof(*section));
uint16_t shndx = symbol->shndx;
shiva_debug("SHNDX: %d\n", shndx);
if (shndx == SHN_COMMON) {
shiva_debug("Symbols SHN_COMMON, we will assume .bss data\n");
return ".bss";
}
/*
* What section does our symbol live in?
*/
if (elf_section_by_index(&linker->elfobj, shndx, section)
== false)
return NULL;
return section->name;
}
static void
transfer_to_module(struct shiva_ctx *ctx, uint64_t entry)
{
void (*fn)(void *arg) = (void (*)(void *))entry;
return fn(ctx);
}
static bool
install_aarch64_call26_patch(struct shiva_ctx *ctx, struct shiva_module *linker,
struct shiva_branch_site *e, struct elf_symbol *patch_symbol,
struct shiva_transform *transform)
{
/*
* The patch_symbol->value will be a symbol value found within the patch
* module, containing an offset into the text section which is the first
* section within a Shiva modules text segment.
*/
uint64_t target_vaddr = patch_symbol->value + linker->text_vaddr;
uint32_t insn_bytes = e->o_insn;
uint32_t call_offset;
shiva_error_t error;
bool res;
shiva_debug("patch_symbol->value: %#lx\n", patch_symbol->value);
shiva_debug("transform: %p\n", transform);
/*
* In the event of a transform, we are re-linking the executable to a function
* that has been transformed with a splice, which requires that we don't use
* the patch_symbol.value (Since it will have been moved), instead we use the
* transform->segment_offset.
*/
target_vaddr = (transform == NULL) ? patch_symbol->value + linker->text_vaddr :
linker->text_vaddr + transform->segment_offset;
shiva_debug("target_vaddr is %#lx\n", target_vaddr);
if (transform == NULL) {
if (module_has_transforms(linker) == true) {
shiva_debug("Increasing target vaddr by %zu bytes\n", linker->tf_text_offset);
target_vaddr += linker->tf_text_offset;
} else {
shiva_debug("Module has no transforms\n");
}
}
shiva_debug("PATCHING BRANCH SITE: %#lx\n", e->branch_site);
call_offset = (target_vaddr - ((e->branch_site + ctx->ulexec.base_vaddr))) >> 2;
shiva_debug("target_vaddr: %#lx branch_site: %#lx\n",
target_vaddr, e->branch_site + ctx->ulexec.base_vaddr);
shiva_debug("call_offset: %#lx encoded: %#lx\n", call_offset * 4, call_offset);
shiva_debug("old insn_bytes: %#x\n", insn_bytes);
insn_bytes = (insn_bytes & ~RELOC_MASK(26)) | (call_offset & RELOC_MASK(26));
/*
* XXX
* Technically the shiva_trace API shouldn't be used from within Shiva.
* It's Akin to the Kernel invoking syscalls. Although atleast we aren't
* calling shiva_trace(), but rather one of it's utility functions for
* writing to memory. This won't cause any harm, but it's not congruent
* with the modeled use cases of Shiva trace API which is meant to be invoked
* by modules.
*/
res = shiva_trace_write(ctx, 0, (void *)e->branch_site + ctx->ulexec.base_vaddr,
(void *)&insn_bytes, 4, &error);
if (res == false) {
fprintf(stderr, "sihva_trace_write failed: %s\n", shiva_error_msg(&error));
return false;
}
return true;
}
/*
* XXX does not properly handle xrefs from target executable
* to fully transformed function.
* TODO
*/
static bool
install_aarch64_xref_patch(struct shiva_ctx *ctx, struct shiva_module *linker,
struct shiva_xref_site *e, struct elf_symbol *patch_symbol)
{
uint32_t n_adrp_insn;
uint32_t n_add_insn;
uint32_t n_ldr_insn;
uint32_t n_str_insn;
int32_t rel_val, xoffset;
uint64_t rel_addr = e->adrp_site + ctx->ulexec.base_vaddr;
uint64_t var_segment;
uint8_t *rel_unit;
struct elf_section shdr;
struct shiva_module_section_mapping smap;
shiva_error_t error;
bool res;
char *shdr_name = NULL;
if (patch_symbol->shndx == SHN_COMMON) {
shiva_debug("shndx == SHN_COMMON for var: %s. Assuming it's a .bss\n",
patch_symbol->name);
shdr_name = ".bss";
} else {
if (elf_section_by_index(&linker->elfobj, patch_symbol->shndx, &shdr) == false) {
fprintf(stderr, "Failed to find section index: %d in module: %s\n",
patch_symbol->shndx, elf_pathname(&linker->elfobj));
return false;
}
shdr_name = shdr.name;
}
if (get_section_mapping(linker, shdr_name, &smap) == false) {
fprintf(stderr, "Failed to retrieve section data for %s\n", shdr_name);
return false;
}
switch(smap.map_attribute) {
case LP_SECTION_TEXTSEGMENT:
shiva_debug("VARSEGMENT(Text): %#lx\n", linker->text_vaddr);
var_segment = linker->text_vaddr;
break;
case LP_SECTION_DATASEGMENT:
shiva_debug("VARSEGMENT(Data): %#lx\n", linker->data_vaddr);
var_segment = linker->data_vaddr;
break;
case LP_SECTION_BSS_SEGMENT:
shiva_debug("VARSEGMENT(Bss): %#lx\n", linker->bss_vaddr);
var_segment = linker->bss_vaddr;
break;
default:
fprintf(stderr, "Unknown section attribute for '%s'\n", shdr_name);
return false;
}
/*
* SHIVA_XREF_INDIRECT external linking patch
*
* An indirect XREF is an indirect access to a variable,
* such as a .bss variable most commonly:
*
* adrp x0, <segment_offset> ; get page aligned address of text + segment_offset
* ldr x0, [x0, #pgoff] ; retrieve the address of the .bss variable from the .got
* ldr w1, [x0] ; load the .bss variable from memory into w1
*
* The absolute address to the variable is computed at runtime via the R_AARCH64_RELATIVE
* relocations: base + addend
*
* Our solution is clean:
* 1. locate the R_AARCH64_RELATIVE relocation who's r_addend is equal
* to the offset of the original .bss variable.
* 2. Calculate the offset of the new patch .bss variable from the base of the executable:
* new_var_addr - executable_base - 4
* 3. Store the offset as the updated r_addend field in the relocation entry
*
* This is a great example of Cross relocation. Shiva is manipulating LDSO meta-data
* to influence the behavior of ld-linux.so. The ld-linux.so rtld will parse the
* R_AARCH64_RELATIVE relocations and apply the new offset for the patched version
* of the .bss variable.
*/
if (e->flags & SHIVA_XREF_F_INDIRECT) {
int i;
Elf64_Rela *rela;
size_t relasz;
uint64_t rela_ptr;
uint64_t var_addr = patch_symbol->value + var_segment;
e->got = (uint64_t *)((uint64_t)e->got + ctx->ulexec.base_vaddr);
if (shiva_target_dynamic_get(ctx, DT_RELASZ, &relasz) == false) {
fprintf(stderr, "shiva_target_dynamic_get(%p, DT_RELASZ, ...) failed\n",
ctx);
return false;
}
if (shiva_target_dynamic_get(ctx, DT_RELA, &rela_ptr) == false) {
fprintf(stderr, "shiva_target_dynamic_set(%p, DT_RELA, ...) failed\n",
ctx);
return false;
}
rela_ptr += ctx->ulexec.base_vaddr;
rela = (void *)rela_ptr;
for (i = 0; i < relasz / sizeof(Elf64_Rela); i++) {
if (rela[i].r_addend == *(e->got)) {
uint64_t relval = var_addr - ctx->ulexec.base_vaddr - 4;
shiva_debug("Found RELATIVE rela.dyn relocation entry for %s\n",
patch_symbol->name);
shiva_debug("Patching rela[%d].r_addend with %#lx\n", i, relval);
res = shiva_trace_write(ctx, 0, (void *)&rela[i].r_addend, (void *)&relval,
8, &error);
if (res == false) {
fprintf(stderr, "shiva_trace_write failed: %s\n",
shiva_error_msg(&error));
return false;
}
/*
* XXX - We do not support ELF32 at the moment, but if we did
* the Elf32_Rel doesn't contain an r_addend field. The rtld
* retrieves it from the relocation unit. We overwrite the
* addend (Pointed to by e->got) with the correct offset to
* the global object (the symbol), i.e. a variable in the .bss.
* This call to shiva_trace_write() isn't necessary on 64bit.
*/
res = shiva_trace_write(ctx, 0, (void *)e->got, (void *)&relval,
8, &error);
if (res == false) {
fprintf(stderr, "shiva_trace_write failed: %s\n",
shiva_error_msg(&error));
return false;
}
}
}
return true;
}
shiva_debug("var_segment: %#lx base_vaddr: %#lx\n", var_segment, ctx->ulexec.base_vaddr);
xoffset = rel_val = (int32_t)(ELF_PAGESTART(patch_symbol->value + var_segment) - ELF_PAGESTART(rel_addr));
rel_val >>= 12;
n_adrp_insn = e->adrp_o_insn & 0xffffffff;
n_adrp_insn = (n_adrp_insn & ~((RELOC_MASK (2) << 29) | (RELOC_MASK(19) << 5)))
| ((rel_val & RELOC_MASK(2)) << 29) | ((rel_val & (RELOC_MASK(19) << 2)) << 3);
switch(e->type) {
case SHIVA_XREF_TYPE_UNKNOWN:
return false;
case SHIVA_XREF_TYPE_ADRP_ADD:
rel_unit = (uint8_t *)e->adrp_site + ctx->ulexec.base_vaddr; // address of unit we are patching in target ELF executable
shiva_debug("Installing SHIVA_XREF_TYPE_ADRP_ADD patch at %#lx\n", e->adrp_site + ctx->ulexec.base_vaddr);
res = shiva_trace_write(ctx, 0, (void *)rel_unit,
(void *)&n_adrp_insn, 4, &error);
if (res == false) {
fprintf(stderr, "shiva_trace_write failed: %s\n", shiva_error_msg(&error));
return false;
}
rel_val = patch_symbol->value;
shiva_debug("Add offset: %#lx\n", rel_val);
n_add_insn = e->next_o_insn;
n_add_insn = (n_add_insn & ~(RELOC_MASK(12) << 10)) | ((rel_val & RELOC_MASK(12)) << 10);
rel_unit += sizeof(uint32_t);
res = shiva_trace_write(ctx, 0, (void *)rel_unit,
(void *)&n_add_insn, 4, &error);
if (res == false) {
fprintf(stderr, "shiva_trace_write failed: %s\n", shiva_error_msg(&error));
return false;
}
break;
case SHIVA_XREF_TYPE_ADRP_LDR:
rel_unit = (uint8_t *)e->adrp_site + ctx->ulexec.base_vaddr;
rel_val = patch_symbol->value;
shiva_debug("Installing SHIVA_XREF_TYPE_ADRP_LDR patch at %#lx\n",
e->adrp_site + ctx->ulexec.base_vaddr);
shiva_debug("SHIVA_XREF_TYPE_ADRP_LDR not yet supported\n");
assert(true);
break;
}
return true;
}
static bool
apply_external_patch_links(struct shiva_ctx *ctx, struct shiva_module *linker)
{
struct shiva_transform *transform = NULL;
struct shiva_transform *tfptr = NULL;
shiva_callsite_iterator_t callsites;
struct shiva_branch_site be, *branch;
shiva_xref_iterator_t xrefs;
struct shiva_xref_site xe;
shiva_iterator_res_t ires;
struct elf_symbol symbol;
bool res;
const char *symname = NULL;
#if __x86_64__
fprintf(stderr, "Cannot apply external patch links on x86_64. Unsupported\n");
return false;
#endif
shiva_callsite_iterator_init(ctx, &callsites);
while (shiva_callsite_iterator_next(&callsites, &be) == SHIVA_ITER_OK) {
if (be.branch_flags & SHIVA_BRANCH_F_PLTCALL) // TODO handle this scenario instead which
continue;
/*
* The callsites were found early on in shiva_analyze.c and
* contain every branch instruction within the target ELF.
*/
/*
* Check the patch object file (Represented by &linker->elfobj) to see
* if it contains the same function name within it as the one originally
* being called. If so then we relink this call instruction to point to
* our new relocated function.
*
* If transformations are involved, then any calls from say main() to
* foo(), are relinked to a newly created version of foo with spliced in
* patch code. This source transform function will be called:
* __shiva_splice_fn_name_foo() in the patch object. So we must search
* for it by the correct name.
*/
shiva_debug("tfptr: %p\n", tfptr);
shiva_debug("Callsite %#lx branches to %#lx\n", be.branch_site, be.target_vaddr);
symname = be.symbol.name;
if (module_has_transforms(linker) == true) {
TAILQ_FOREACH(transform, &linker->tailq.transform_list, _linkage) {
switch(transform->type) {
case SHIVA_TRANSFORM_SPLICE_FUNCTION:
shiva_debug("Comparing %s and %s\n",
transform->source_symbol.name +
strlen(SHIVA_T_SPLICE_FUNC_ID), be.symbol.name);
if (strcmp(transform->source_symbol.name +
strlen(SHIVA_T_SPLICE_FUNC_ID), be.symbol.name) == 0) {
symname = transform->source_symbol.name;
shiva_debug("Transform source found: %s\n", symname);
tfptr = transform;
}
break;
default:
break;
}
}
}
if (elf_symbol_by_name(&linker->elfobj, symname,
&symbol) == true) {
if (symbol.type != STT_FUNC ||
symbol.bind != STB_GLOBAL)
continue;
#if __aarch64__
shiva_debug("Installing patch offset on target at %#lx for %s. Transform: %p\n",
be.branch_site + ctx->ulexec.base_vaddr, symbol.name, tfptr);
res = install_aarch64_call26_patch(ctx, linker, &be, &symbol, tfptr);
if (res == false) {
fprintf(stderr, "external linkage failure: "
"install_aarch64_call26_patch() failed\n");
return false;
}
#endif
}
tfptr = NULL;
}
shiva_debug("Calling shiva_xref_iterator_init\n");
shiva_xref_iterator_init(ctx, &xrefs);
shiva_debug("iterating over xrefs\n");
while (shiva_xref_iterator_next(&xrefs, &xe) == SHIVA_ITER_OK) {
switch(xe.type) {
case SHIVA_XREF_TYPE_UNKNOWN:
fprintf(stderr, "External linkage failure: "
"Discovered unknown XREF insn-sequence at %#lx\n",
xe.adrp_site);
return false;
case SHIVA_XREF_TYPE_ADRP_LDR:
case SHIVA_XREF_TYPE_ADRP_STR:
case SHIVA_XREF_TYPE_ADRP_ADD:
shiva_debug("Found %s XREF at %#lx for %s\n",
(xe.flags & SHIVA_XREF_F_INDIRECT) ? "indirect" : "", xe.adrp_site, xe.symbol.name);
if (elf_symbol_by_name(&linker->elfobj,
(xe.flags & SHIVA_XREF_F_INDIRECT) ? xe.deref_symbol.name : xe.symbol.name,
&symbol) == true) {
shiva_debug("Found symbol for %s\n", xe.symbol.name);
if (symbol.type != STT_OBJECT ||
symbol.bind != STB_GLOBAL)
continue;
shiva_debug("Installing xref patch at %#lx for symbol %s\n",
xe.adrp_site, xe.symbol.name);
res = install_aarch64_xref_patch(ctx, linker, &xe, &symbol);
if (res == false) {
fprintf(stderr, "install_aarch64_xref_patch() for '%s' failed\n",
symbol.name);
return false;
}
}
break;
default:
break;
}
}
return true;
}
/*
* Module entry point. Lookup symbol "shakti_main"
*/
static bool
module_entrypoint(struct shiva_module *linker, uint64_t *entry)
{
struct elf_symbol symbol;
if (elf_symbol_by_name(&linker->elfobj, "shakti_main", &symbol) == false) {
shiva_debug("elf_symbol_by_name failed to find 'shakti_main'\n");
return false;
}
shiva_debug("Module text: %#lx\n", linker->text_vaddr);
*entry = linker->text_vaddr + symbol.value;
return true;
}
static bool
got_entry_by_name(struct shiva_module *linker, char *name, struct shiva_module_got_entry *out)
{
struct shiva_module_got_entry *got_entry;
TAILQ_FOREACH(got_entry, &linker->tailq.got_list, _linkage) {
if (strcmp(got_entry->symname, name) == 0) {
memcpy(out, got_entry, sizeof(*out));
return true;
}
}
return false;
}
/*
* Shiva Module's have a .got.plt section at the end
* of the data segment in memory.
*
* "Shiva Module layout"
* [text segment]: 0x8000000 (.text, .rodata)
* [data segment]: 0x9000000 (.data, .got, .bss)
*
* We patch the .got with the correct address to
* either a libc function (That is resolved to
* the musl-libc within the Shiva executable) or
* a function native to the Shiva module itself.
*/
static bool
resolve_pltgot_entries(struct shiva_module *linker)
{
uint64_t gotaddr, so_base;
uint64_t *GOT;
struct shiva_module_got_entry *current;
char *so_path;
bool res;
/*
* Order of symbol resolution:
* 1. Resolve symbol from local Shiva module (i.e. patch.o)
* 2. Resolve symbol from target executable
* 3. Resolve symbol from targets shared library dependencies.
*/
gotaddr = linker->data_vaddr + linker->pltgot_off;
TAILQ_FOREACH(current, &linker->tailq.got_list, _linkage) {
struct elf_symbol symbol;
/*
* Setup the modules internal GOT table.
*/
GOT = (uint64_t *)((uint64_t)(linker->data_vaddr + linker->pltgot_off + current->gotoff));
shiva_debug("Processing GOT[%s](%#lx)\n", current->symname, (uint64_t)GOT);
/*
* First look for the functions symbol within the loaded Shiva module
*/
if (elf_symbol_by_name(&linker->elfobj, current->symname, &symbol) == true) {
/*
* TODO: investigate why we are accepting STT_OBJECT here. This is our
* PLT/GOT for the Shiva module. Should only be function calls in this
* part of the GOT, I think... Could cause a bug.
*/
if (symbol.type == STT_FUNC || symbol.type == STT_OBJECT) {
shiva_debug("Setting [%#lx] GOT entry '%s' to %#lx\n",
linker->data_vaddr + linker->pltgot_off +
current->gotoff, current->symname, symbol.value + linker->text_vaddr +
(module_has_transforms(linker) == true ? linker->tf_text_offset : 0));
*GOT = symbol.value + linker->text_vaddr +
(module_has_transforms(linker) == true ? linker->tf_text_offset : 0);
shiva_debug("*GOT = %#lx (Address within Shiva module)\n", *GOT);
continue;
}
}
/*
* If the PLTGOT entry doesn't point to a symbol within the Shiva module
* itself, then let's check to see if we find it in the target executable.
* Only applicable if linking mode is set: SHIVA_LINKING_MICROCODE_PATCH
*/
if (linker->mode == SHIVA_LINKING_MICROCODE_PATCH) {
bool in_target = false;
struct elf_plt plt_entry;
struct shiva_module_delayed_reloc *delay_rel;
ENTRY e, *ep;
e.key = current->symname;
e.data = NULL;
/*
* Handle the special case of SHIVA_HELPER_CALL_EXTERNAL()
* macro. Symbols are in the patch object, but give Shiva
* descriptive information about resolving an external symbol
* within the target. See SHIVA HELPER macros in documentation.
*/
shiva_debug("Searching cache for %s\n", current->symname);
if (hsearch_r(e, FIND, &ep, &linker->cache.helpers) != 0) {
char *real_symname;
/*
* We are dealing with a helper symbol that denotes that
* we need to resolve the GOT entry with the value of the
* external version of a given symbol. See SHIVA_HELPER macros
* in documentation.
*/
real_symname = strstr(symbol.name, "_orig_func_");
real_symname += strlen("_orig_func_");
shiva_debug("Looking up symbol: '%s' in target\n", real_symname);
if (elf_symbol_by_name(linker->target_elfobj, real_symname,
&symbol) == true) {
if (symbol.value == 0 || symbol.type != STT_FUNC) {
fprintf(stderr, "external symbol is invalid: %s\n",
symbol.name);
return false;
}
shiva_debug("Resolving helper function '%s' to external symbol '%s'"
" = %#lx\n", symbol.name, real_symname,
symbol.value + linker->target_base);
*(uint64_t *)GOT = symbol.value + linker->target_base;
continue;
}
}
/*
* Next we handle all other cases
*/
shiva_debug("Looking up symbol '%s' in target %s\n", current->symname,
elf_pathname(linker->target_elfobj));
if (elf_symbol_by_name(linker->target_elfobj, current->symname,
&symbol) == true) {
if (symbol.value == 0 && symbol.type == STT_FUNC) {
if (elf_plt_by_name(linker->target_elfobj,
symbol.name, &plt_entry) == true) {
struct elf_symbol tmp;
char path_out[PATH_MAX];
shiva_debug("Symbol '%s' is a PLT entry, let's look it up in the shared libraries\n",
symbol.name);
res = shiva_so_resolve_symbol(linker, (char *)symbol.name, &tmp, &so_path);
if (res == false) {
fprintf(stderr, "Failed to resolve symbol '%s' in shared libs\n",
symbol.name);
return false;
}
if (realpath(so_path, path_out) == NULL) {
perror("realpath");
return false;
}
delay_rel = shiva_malloc(sizeof(*delay_rel));
delay_rel->rel_unit = (uint8_t *)GOT;
delay_rel->rel_addr = (uint64_t)GOT;
delay_rel->symval = tmp.value;
delay_rel->symname = shiva_strdup(symbol.name);
strncpy(delay_rel->so_path, path_out, PATH_MAX);
delay_rel->so_path[PATH_MAX - 1] = '\0';
shiva_debug("Delayed relocation for GOT[%s] -> lookup %s\n",
symbol.name, delay_rel->so_path);
/*
* We don't fill out the value of the GOT. The shared library
* whom the symbol lives in hasn't even been loaded by the
* ld-linux.so yes. Once ld-linux.so is finished it will pass
* control to shiva_post_linker() function once the base address
* can be known of the library. We must insert a delayed relocation
* entry.
*/
if (enable_post_linker(linker) == false) {
fprintf(stderr, "failed to enable delayed relocs\n");
return false;
}
TAILQ_INSERT_TAIL(&linker->tailq.delayed_reloc_list, delay_rel, _linkage);
} else {
fprintf(stderr, "Undefined linking behavior: No PLT entry for STT_FUNC '%s' with zero value\n",
symbol.name);
return false;
}
} else if (symbol.value > 0 && symbol.type == STT_FUNC) {
shiva_debug("resolved symbol in target: %s\n", elf_pathname(linker->target_elfobj));
*(uint64_t *)GOT = symbol.value + linker->target_base;
}
} else {
/*
* The symbol isn't in the target ELF exectutable, or in the Shiva
* module. Let's try resolving it from the shared library dependencies
* listed in the targets dynamic segment.
*/
struct elf_symbol tmp;
char path_out[PATH_MAX];
res = shiva_so_resolve_symbol(linker, (char *)symbol.name, &tmp, &so_path);
if (res == false) {
fprintf(stderr, "Failed to resolve symbol '%s' in shared libs\n",
symbol.name);
return false;
}
if (realpath(so_path, path_out) == NULL) {
perror("realpath");
return false;
}
delay_rel = shiva_malloc(sizeof(*delay_rel));
delay_rel->rel_unit = (uint8_t *)GOT;
delay_rel->rel_addr = (uint64_t)GOT;
delay_rel->symval = tmp.value;
delay_rel->symname = shiva_strdup(symbol.name);
strncpy(delay_rel->so_path, path_out, PATH_MAX);
delay_rel->so_path[PATH_MAX - 1] = '\0';
shiva_debug("Delayed relocation for GOT[%s] -> lookup %s\n",
symbol.name, delay_rel->so_path);
/*
* We don't fill out the value of the GOT. The shared library
* whom the symbol lives in hasn't even been loaded by the
* ld-linux.so yes. Once ld-linux.so is finished it will pass
* control to shiva_post_linker() function once the base address
* can be known of the library. We must insert a delayed relocation
* entry.
*/
if (enable_post_linker(linker) == false) {
fprintf(stderr, "failed to enable delayed relocs\n");
return false;
}
TAILQ_INSERT_TAIL(&linker->tailq.delayed_reloc_list, delay_rel, _linkage);
continue;
}
/*
* This next condition only exists on x86_64 currently anyway.
* We may remove linker->mode from the AMP version of Shiva
* and start erraticating old linking styles that are still
* important, but not so much to AMP. Although I think it could
* be?
*/
} else if (linker->mode == SHIVA_LINKING_MODULE) {
if (elf_symbol_by_name(&linker->self, current->symname, &symbol) == false) {
fprintf(stderr, "Could not resolve symbol '%s'. Linkage failure!\n",
current->symname);
return false;
}
*(uint64_t *)GOT = symbol.value;
shiva_debug("Found symbol '%s':%#lx within the Shiva API\n", current->symname,
symbol.value);
} else {
fprintf(stderr, " Undefined linking behavior\n");
shiva_debug("undefined linking behavior\n");
return false;
}
}
return true;
}
static bool
patch_plt_stubs(struct shiva_module *linker)
{
size_t i = 0;
struct shiva_module_plt_entry *current;
uint8_t *stub;
uint64_t gotaddr, pltaddr, gotoff;
TAILQ_FOREACH(current, &linker->tailq.plt_list, _linkage) {
struct shiva_module_got_entry got_entry;
if (got_entry_by_name(linker, current->symname, &got_entry) == false) {
fprintf(stderr, "Unable to find GOT entry for '%s'\n", current->symname);
return false;
}
stub = &linker->text_mem[linker->plt_off + i * sizeof(plt_stub)];
gotaddr = linker->data_vaddr + linker->pltgot_off + got_entry.gotoff;
pltaddr = linker->text_vaddr + (linker->plt_off + i * sizeof(plt_stub));
gotoff = gotaddr - pltaddr - sizeof(plt_stub);
#ifdef __x86_64__
*(uint32_t *)&stub[2] = gotoff;
#elif __aarch64__
shiva_debug("got_addr: %#lx\n", gotaddr);
uint32_t rval = ((gotaddr - pltaddr) >> 2);
uint32_t insn_bytes = *(uint32_t *)&stub[0];
insn_bytes = (insn_bytes & ~(RELOC_MASK(19) << 5)) | ((rval & RELOC_MASK(19)) << 5);
*(uint32_t *)&stub[0] = insn_bytes;
#endif
i++;
shiva_debug("SYMNAME: %s PLTADDR: %#lx GOTADDR: %#lx GOTOFF: %#lx\n", current->symname, pltaddr, gotaddr, gotoff);
shiva_debug("Fixedup PLT stub with GOT offset: %#lx\n", gotoff);
}
return true;
}
static bool
get_section_mapping(struct shiva_module *linker, char *shdrname, struct shiva_module_section_mapping *smap)
{
struct shiva_module_section_mapping *current;
TAILQ_FOREACH(current, &linker->tailq.section_maplist, _linkage) {
if (strcmp(shdrname, current->name) != 0)
continue;
memcpy(smap, current, sizeof(*smap));
return true;
}
return false;
}
/*
* An STT_NOTYPE symbol was found within the Shiva module;
* This must be an external reference to a symbol. Search order:
* 1. Check target executable for symbol.
* 2. Check target executable's dependencies (DT_NEEDED) for symbol.
*/
#define RESOLVER_TARGET_SHIVA_SELF 0
#define RESOLVER_TARGET_EXECUTABLE 1
#define RESOLVER_TARGET_SO_RESOLVE 2
static bool
internal_symresolve(struct shiva_module *linker, char *symname,
struct elf_symbol *symbol, uint64_t *e_type, uint64_t *type, char *path_out)
{
struct elf_symbol tmp;
struct elfobj *elfobj = linker->mode == SHIVA_LINKING_MODULE ?
&linker->self : linker->target_elfobj;
bool res;
shiva_debug("Looking up symbol %s in %s\n", symname, linker->mode ==
SHIVA_LINKING_MODULE ? "the Shiva Interpreter" : "target ELF executable");
res = elf_symbol_by_name(elfobj, symname, &tmp);
*e_type = elf_type(elfobj);
if (res == true) {
switch(tmp.type) {
case STT_NOTYPE:
/*
* XXX this NOTYPE case is somewhat undefined. We're looking for a symbol
* that was STT_NOTYPE in the Shiva patch, and so we search externally for
* it, and it is again STT_NOTYPE. I think we might only hit this case
* by random. Temporarily commenting this code out, I'm pretty sure this
* is an invalid ELF linking path to take.
*/
shiva_debug("Found symbol '%s' in target, but it's NOTYPE\n", symname);
shiva_debug("Undefined linking behavior\n");
return false;
#if 0
switch (linker->mode) {
case SHIVA_LINKING_MICROCODE_PATCH:
*e_type = elf_type(&linker->self);
*type = RESOLVER_TARGET_SHIVA_SELF;
res = elf_symbol_by_name(&linker->self, symname, &tmp);
if (res == true) {
memcpy(symbol, &tmp, sizeof(*symbol));
return true;
}
break;
case SHIVA_LINKING_MODULE:
default:
shiva_debug("Found no symbol '%s' in shiva binary\n", symname);
return false;
}
#endif
case STT_FUNC:
case STT_OBJECT:
shiva_debug("Found symbol '%s' in %s\n", symname, linker->mode == SHIVA_LINKING_MODULE ?
"shiva binary" : "target binary");
if (linker->mode == SHIVA_LINKING_MODULE) {
*type = RESOLVER_TARGET_SHIVA_SELF;
} else {
*type = RESOLVER_TARGET_EXECUTABLE;
}
memcpy(symbol, &tmp, sizeof(*symbol));
return true;
default:
return false;
}
} else if (res == false && linker->mode == SHIVA_LINKING_MICROCODE_PATCH) {
char *so_path;
res = shiva_so_resolve_symbol(linker, (char *)symname, &tmp, &so_path);
if (res == true) {
*type = RESOLVER_TARGET_SO_RESOLVE;
*e_type = ET_DYN;
if (realpath(so_path, path_out) == NULL) {
perror("realpath");
return false;
}
shiva_debug("Found symbol '%s:%#lx' within shared library '%s'\n", symname,
tmp.value, path_out);
memcpy(symbol, &tmp, sizeof(*symbol));
return true;
}
res = elf_symbol_by_name(&linker->self, symname, &tmp);
if (res == true) {
*type = RESOLVER_TARGET_SHIVA_SELF;
*e_type = elf_type(&linker->self);
shiva_debug("Found symbol '%s' within the Shiva binary: %#lx\n", symname, tmp.value);
memcpy(symbol, &tmp, sizeof(*symbol));
return true;
} else {
shiva_debug("Failed to find symbol '%s'\n", symname);
return false;
}
}
return false;
}
static bool
enable_post_linker(struct shiva_module *linker)
{
shiva_ctx_t *ctx = linker->ctx;
shiva_auxv_iterator_t a_iter;
struct shiva_auxv_entry a_entry;
if (linker->flags & SHIVA_MODULE_F_DELAYED_RELOCS)
return true;
shiva_debug("Enabling post linker for delayed relocations\n");
linker->flags |= SHIVA_MODULE_F_DELAYED_RELOCS;
if (shiva_auxv_iterator_init(ctx, &a_iter,
ctx->ulexec.auxv.vector) == false) {
fprintf(stderr, "shiva_auxv_iterator_init failed\n");
return false;
}
while (shiva_auxv_iterator_next(&a_iter, &a_entry) == SHIVA_ITER_OK) {
if (a_entry.type == AT_ENTRY) {
uint64_t entry;
/*
* IMPORTANT NOTE:
* In our aarch64 implementation, shiva is an ET_EXEC
* so we can pass a function address as absolute. In
* other implementations we would have to create a macro
* to entry = GET_RIP() - &shiva_post_linker
* -- In aarch64 Shiva we can just pass &shiva_post_linker address
* directly.
*/
shiva_debug("Enabling post linker, setting AT_ENTRY to %#lx\n",
&shiva_post_linker);
entry = (uint64_t)&shiva_post_linker;
if (shiva_auxv_set_value(&a_iter, entry) == false) {
fprintf(stderr, "shiva_auxv_set_value failed (Setting %#lx)\n", entry);
return false;
}
break;
}
}
return true;
}
bool
is_text_encoding_reloc(struct shiva_module *linker, uint64_t r_offset)
{
struct elf_section shdr;
elf_symtab_iterator_t sym_iter;
struct elf_symbol symbol;
bool found_sym = false;
shiva_debug("r_offset: %#lx\n", r_offset);
assert(elf_section_by_name(&linker->elfobj, ".text", &shdr) == true);
shiva_debug("r_offset: %#lx shdr.offset: %#lx shdr.size: %#lx\n", r_offset, shdr.offset,
shdr.size);
elf_symtab_iterator_init(&linker->elfobj, &sym_iter);
while (elf_symtab_iterator_next(&sym_iter, &symbol) == ELF_ITER_OK) {
if (symbol.type != STT_FUNC)
continue;
if (r_offset >= symbol.value && r_offset < symbol.value + symbol.size) {
found_sym = true;
}
}
return !found_sym;
}
bool
apply_relocation(struct shiva_module *linker, struct elf_relocation rel,
struct shiva_transform *transform)
{
struct shiva_module_plt_entry *current = NULL;
struct shiva_module_section_mapping *smap_current;
struct shiva_module_section_mapping smap, smap_tmp;
uint8_t *rel_unit;
uint64_t symval;
uint64_t rel_addr;
uint64_t rel_val;
uint32_t insn_bytes;
struct elf_symbol symbol;
ENTRY e, *ep;
struct shiva_module_got_entry got_entry;
bool res;
char *symbol_section;
struct elf_section tmp_shdr;
char *shdrname = strrchr(rel.shdrname, '.');
if (shdrname == NULL) {
shiva_debug("strrchr failed\n");
return false;
}
if (get_section_mapping(linker, shdrname, &smap) == false) {
shiva_debug("Failed to retrieve section data for %s\n", rel.shdrname);
return false;
}
shiva_debug("Successfully retrieved section mapping for %s\n", shdrname);
shiva_debug("linker->text_vaddr: %#lx\n", linker->text_vaddr);
shiva_debug("linker->data_vaddr: %#lx\n", linker->data_vaddr);
shiva_debug("smap.offset: %#lx\n", smap.offset);
#if defined (__aarch64__)
if (module_has_transforms(linker) == true &&
strcmp(shdrname, ".text") == 0) {
if (transform != NULL) {
bool text_on_text_reloc = false;
bool text_encoding = false;
/*
* We are relocating code that has been spliced into a target
* function, via transformation.
* Relocation offset equals offset of function we are transforming (segment_offset)
* plus the splice offset (transform->offset) plus the original relocation offset.
*/
shiva_debug("Transform splice relocation: segment_offset %#lx transform_offset %#lx\n",
transform->segment_offset, transform->offset);
shiva_debug("Rel type: %d\n", rel.type);
if (rel.type == R_AARCH64_ADR_PREL_PG_HI21 ||
rel.type == R_AARCH64_ADD_ABS_LO12_NC) {
shiva_debug("Testing rel.symname: %s with .text\n",
rel.symname);
if (strcmp(rel.symname, ".text") == 0) {
shiva_debug("Found text on text relocation\n");
shiva_debug("Increasing r_addend by %zu bytes\n",
transform->splice.copy_len3 +
transform->segment_offset + transform->offset);
rel.addend += transform->segment_offset + transform->offset;
rel.addend += transform->splice.copy_len3;
}
}
/*
* XXX In the future maybe just check to see if this is
* an R_AARCH64_ABS64 relocation.
*/
if (is_text_encoding_reloc(linker, rel.offset) == true) {
shiva_debug("Text encoding is true! Increasing r_offset by %zu\n",
transform->splice.copy_len3);
/*
* See transformation specification on handling
* relocations that apply to .text encoded data.
*/
shiva_debug("Increasing r_offset(%#lx) to %#lx\n", rel.offset,
rel.offset + transform->splice.copy_len3);
rel.offset += transform->splice.copy_len3;
text_encoding = true;
}
/*
* In the event of relocating a spliced function we must always increase
* the rel.offset to match the new location.
*/
rel.offset = transform->segment_offset + transform->offset + rel.offset;
} else {
shiva_debug("Transforms exist. rel_offset = rel.offset(%#lx)"
" + linker->tf_text_offset(%#lx) = %#lx\n", rel.offset,
linker->tf_text_offset, rel.offset + linker->tf_text_offset);
/*
* We are relocating code that exists after all splices in our modules