optimize shellcode and recover main address at runtime

This commit is contained in:
nganhkhoa 2023-07-10 14:15:05 +07:00
parent ed2f09348e
commit eccd0bf845
3 changed files with 85 additions and 83 deletions

View File

@ -408,14 +408,12 @@ func (mc *MachoContext) ReworkForObjc() {
// __bss section is dynamically allocated at the end to or something, hmmge // __bss section is dynamically allocated at the end to or something, hmmge
// assume that it order correctly, which it should if compiled and not modified // assume that it order correctly, which it should if compiled and not modified
// each section has their addr field which we can use that with segment virtual address
// to calculate the offset of the last section from segment starts
// then use the size of section to calculate the end of segment in file
sections := segment.Sections() sections := segment.Sections()
last := sections[len(sections)-1] last := sections[len(sections)-1]
data_end = int(last.Offset()) + int(last.Size()) data_end = int(last.Addr() - segment.Vmaddr() + segment.Fileoff() + last.Size())
if last.Offset() == 0 {
before_last := sections[len(sections)-2]
data_end += int(before_last.Offset()) + int(before_last.Size())
}
} }
ptr += int64(cmd.Cmdsize()) ptr += int64(cmd.Cmdsize())
} }
@ -431,62 +429,54 @@ func (mc *MachoContext) ReworkForObjc() {
// we need to store the return address, and parameters passed to main // we need to store the return address, and parameters passed to main
// we also store our header address to not calculate many times // we also store our header address to not calculate many times
/* // must expand stack to store arguments passed
adr x8, 0 // must use newly allocated stack region
sub sp, sp, #0x30 // must save return address
str x30, [sp] // must recover stack to same value before calling main
movz x9, #0x3d68 ; offset at this point // must recover link register before calling main
sub x8, x8, x9
str x8, [sp, #0x8]
str x0, [sp, #0x10]
str x1, [sp, #0x18]
str x2, [sp, #0x20]
str x3, [sp, #0x28]
movz x9, #0x81d8 ; offset to end of __DATA // we use shorthand store/load multiple
add x9, x8, x9 // arm also has different indexing instruction, so be careful
ldr x9, [x9] // https://developer.arm.com/documentation/102374/0101/Loads-and-stores---addressing
blr x9 /*
ldr x8, [sp, #0x8] adr x8, 0
ldr x0, [sp, #0x10] # x9 = (offset end of __DATA) - (offset shellcode)
ldr x1, [sp, #0x18] movz x9, #0x9999
ldr x2, [sp, #0x20] add x8, x8, x9
ldr x3, [sp, #0x28]
movz x9, #0x3e3c ; offset to original main stp x30, x8, [sp], #-0x10
add x9, x8, x9 stp x3, x2, [sp], #-0x10
blr x9 stp x1, x0, [sp], #-0x10
ldr x30, [sp]
add sp, sp, #0x10 # custom intializer
ret ldr x9, [x8]
blr x9
ldp x1, x0, [sp, #0x10]!
ldp x3, x2, [sp, #0x10]!
ldp x30, x8, [sp, #0x10]!
# original main
# link register is set so jump only
ldr x9, [x8, #8]
br x9
*/ */
// TODO: fix to work with offset larger than 0xffff // TODO: fix to work with offset larger than 0xffff
shellcode := []uint32{ shellcode := []uint32{
0x10000008, 0x10000008,
0xD100C3FF, 0, // x9 = (offset end of __DATA) - (offset shellcode)
0xF90003FE, 0x8B090108,
0, // movz_shellcode_offset, 0xA8BF23FE,
0xCB090108, 0xA8BF0BE3,
0xF90007E8, 0xA8BF03E1,
0xF9000BE0, 0xF9400109,
0xF9000FE1,
0xF90013E2,
0xF90017E3,
0, // movz_data_end_offset,
0x8B090109,
0xF9400129,
0xD63F0120, 0xD63F0120,
0xF94007E8, 0xA9C103E1,
0xF9400BE0, 0xA9C10BE3,
0xF9400FE1, 0xA9C123FE,
0xF94013E2, 0xF9400509,
0xF94017E3, 0xD61F0120,
0, // movz_main_offset,
0x8B090109,
0xD63F0120,
0xF94003FE,
0x910043FF,
0xD65F03C0,
} }
ins_size_byte := 4 ins_size_byte := 4
@ -497,20 +487,22 @@ func (mc *MachoContext) ReworkForObjc() {
return uint32(uint32(v)<<5 | uint32(0x694)<<21 | uint32(0x09)) return uint32(uint32(v)<<5 | uint32(0x694)<<21 | uint32(0x09))
} }
movz_shellcode_offset := encode_movz(shellcode_offset) // movz_shellcode_offset := encode_movz(shellcode_offset)
movz_main_offset := encode_movz(main_offset) // movz_main_offset := encode_movz(main_offset)
movz_data_end_offset := encode_movz(data_end) // movz_data_end_offset := encode_movz(data_end)
movz_calculate_offset := encode_movz(data_end - shellcode_offset)
shellcode[3] = movz_shellcode_offset shellcode[1] = movz_calculate_offset
shellcode[10] = movz_data_end_offset // shellcode[10] = movz_data_end_offset
shellcode[19] = movz_main_offset // shellcode[19] = movz_main_offset
fmt.Printf("// shellcode_offset=%x\n", shellcode_offset) fmt.Printf("// shellcode_offset=%x\n", shellcode_offset)
fmt.Printf("// main_offset=%x\n", main_offset) fmt.Printf("// main_offset=%x\n", main_offset)
fmt.Printf("// data_end=%x\n", data_end) fmt.Printf("// data_end=%x\n", data_end)
fmt.Printf("// movz_shellcode_offset=%x\n", movz_shellcode_offset) fmt.Printf("// movz_calculate_offset=%x\n", movz_calculate_offset)
fmt.Printf("// movz_main_offset=%x\n", movz_main_offset) // fmt.Printf("// movz_shellcode_offset=%x\n", movz_shellcode_offset)
fmt.Printf("// movz_data_end_offset=%x\n", movz_data_end_offset) // fmt.Printf("// movz_main_offset=%x\n", movz_main_offset)
// fmt.Printf("// movz_data_end_offset=%x\n", movz_data_end_offset)
fmt.Printf("// lc_main_offset=%x\n", lc_main_offset) fmt.Printf("// lc_main_offset=%x\n", lc_main_offset)
offset := int64(shellcode_offset) offset := int64(shellcode_offset)

View File

@ -64,6 +64,10 @@ func (mc *MachoContext) Segments() []Segment {
return mc.segments return mc.segments
} }
func (mc *MachoContext) Main() uint64 {
return mc.entryoff
}
func (mc *MachoContext) WriteEnabled() bool { func (mc *MachoContext) WriteEnabled() bool {
return mc.file != nil return mc.file != nil
} }

View File

@ -1230,24 +1230,29 @@ void fix_initializer(struct libcache_item* libfixing, struct libcache& cache) {
// (note: __TEXT segment is aligned to the end of the page, free space in the middle) // (note: __TEXT segment is aligned to the end of the page, free space in the middle)
// //
// Below is the shellcode. // Below is the shellcode.
// /*
// sub sp, sp, #0x10 adr x8, 0
// str x30, [sp] # x9 = (offset end of __DATA) - (offset shellcode)
// adr x8, 0 movz x9, #0x9999
// movz x9, #0x3d68 ; offset at this point add x8, x8, x9
// sub x8, x8, x9
// str x8, [sp, #8] stp x30, x8, [sp], #-0x10
// movz x9, #0x81d8 stp x3, x2, [sp], #-0x10
// add x9, x8, x9 stp x1, x0, [sp], #-0x10
// ldr x9, [x9]
// blr x9 # custom intializer
// ldr x8, [sp, #8] ldr x9, [x8]
// movz x9, #0x3e3c ; offset to original main blr x9
// add x9, x8, x9
// blr x9 ldp x1, x0, [sp, #0x10]!
// ldr x30, [sp] ldp x3, x2, [sp, #0x10]!
// add sp, sp, #0x10 ldp x30, x8, [sp, #0x10]!
// ret
# original main
# link register is set so jump only
ldr x9, [x8, #8]
br x9
*/
void* header = libfixing->header; void* header = libfixing->header;
const uint32_t magic = *(uint32_t *)header; const uint32_t magic = *(uint32_t *)header;
@ -1309,7 +1314,8 @@ void fix_initializer(struct libcache_item* libfixing, struct libcache& cache) {
uint64_t size = *((uint64_t*)sections_ptr + 5); uint64_t size = *((uint64_t*)sections_ptr + 5);
uint64_t* dummy = (uint64_t*)(addr + slide + size); uint64_t* dummy = (uint64_t*)(addr + slide + size);
*dummy = (uint64_t)custom_initializer; dummy[0] = (uint64_t)custom_initializer;
dummy[1] = (uint64_t)(header) + bshield_data::main;
printf("add custom main-peg at %p\n", dummy); printf("add custom main-peg at %p\n", dummy);
} else if (custom_strcmp(name, "__LINKEDIT") == 0) { } else if (custom_strcmp(name, "__LINKEDIT") == 0) {
linkedit_vmaddr = vmaddr; linkedit_vmaddr = vmaddr;