This commit is contained in:
nganhkhoa 2025-01-13 12:54:05 -06:00
parent 1c495989d4
commit 06bbde2612
7 changed files with 414 additions and 418 deletions

View File

@ -18,4 +18,3 @@ func (action *removeStrings) withFat(ff *FatFile) error {
func NewRemoveStringsAction() *removeStrings {
return &removeStrings{}
}

View File

@ -117,7 +117,7 @@ func Cli() {
pc.remove_inits = true
pc.remove_codesign = true
pc.remove_others = true
pc.remove_string = true
pc.remove_string = true
}
pc.remove_imports = arg.RemoveBindSymbols
pc.remove_codesign = arg.RemoveCodeSign
@ -125,7 +125,7 @@ func Cli() {
pc.remove_others = arg.RemoveOthers
pc.remove_exports = arg.RemoveExports
pc.remove_symbol_table = arg.RemoveSymbolTable
pc.remove_string = arg.RemoveStrings
pc.remove_string = arg.RemoveStrings
pc.dylib_to_add = arg.Dylibs
pc.rpath_to_add = arg.Rpath
pc.outfile = arg.Out
@ -266,54 +266,54 @@ func bcell2header(bfile string, header string) {
}
fmt.Fprintf(w, "};\n")
if info.Symbols != nil {
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char libs[] =\n")
for _, lib := range info.Symbols.Libs {
fmt.Fprintf(w, " \"%s\\0\"\n", lib)
}
fmt.Fprintf(w, ";\n")
if info.Symbols != nil {
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char libs[] =\n")
for _, lib := range info.Symbols.Libs {
fmt.Fprintf(w, " \"%s\\0\"\n", lib)
}
fmt.Fprintf(w, ";\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char symbols[] =\n")
for _, symbol := range info.Symbols.Symbols {
fmt.Fprintf(w, " \"%s\\0\"\n", symbol)
}
fmt.Fprintf(w, ";\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char symbols[] =\n")
for _, symbol := range info.Symbols.Symbols {
fmt.Fprintf(w, " \"%s\\0\"\n", symbol)
}
fmt.Fprintf(w, ";\n")
fmt.Fprintf(w, "// very compact symbol table,\n")
fmt.Fprintf(w, "// [lib idx/*4 bytes*/, nsymbol/*4 byte*/]\n")
fmt.Fprintf(w, "// repeat nsymbol times [name offset/*3 bytes*/, segment idx/**/, offset /*4 btyes*/]\n")
fmt.Fprintf(w, "// name offset is 3 bytes because we don't think we should have a table size > 2^(3 * 8)\n")
fmt.Fprintf(w, "// very compact symbol table,\n")
fmt.Fprintf(w, "// [lib idx/*4 bytes*/, nsymbol/*4 byte*/]\n")
fmt.Fprintf(w, "// repeat nsymbol times [name offset/*3 bytes*/, segment idx/**/, offset /*4 btyes*/]\n")
fmt.Fprintf(w, "// name offset is 3 bytes because we don't think we should have a table size > 2^(3 * 8)\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "uint32_t encoded_table[] = {\n")
n_instructions := 0
for i, table := range info.Symbols.Tables {
fmt.Fprintf(w, " // %s\n", info.Symbols.Libs[i])
fmt.Fprintf(w, " %d/*lib offset*/,\n", table.LibIndex)
fmt.Fprintf(w, " %d/*nsymbols*/,\n", table.Nsymbols)
n_instructions += 2
for _, symbol := range table.Symbols {
fmt.Fprintf(w, " %d, 0x%x,\n", (symbol.SymbolIndex<<8)|symbol.SegmentIndex, symbol.Offset)
n_instructions += 2
}
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "};\n")
fmt.Fprintf(w, "uint32_t n_instructions = %d;\n", n_instructions)
} else {
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char libs[] = {};\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "uint32_t encoded_table[] = {\n")
n_instructions := 0
for i, table := range info.Symbols.Tables {
fmt.Fprintf(w, " // %s\n", info.Symbols.Libs[i])
fmt.Fprintf(w, " %d/*lib offset*/,\n", table.LibIndex)
fmt.Fprintf(w, " %d/*nsymbols*/,\n", table.Nsymbols)
n_instructions += 2
for _, symbol := range table.Symbols {
fmt.Fprintf(w, " %d, 0x%x,\n", (symbol.SymbolIndex<<8)|symbol.SegmentIndex, symbol.Offset)
n_instructions += 2
}
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "};\n")
fmt.Fprintf(w, "uint32_t n_instructions = %d;\n", n_instructions)
} else {
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char libs[] = {};\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char symbols[] = {};\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "char symbols[] = {};\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "uint32_t encoded_table[] = {};\n")
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "uint32_t encoded_table[] = {};\n")
fmt.Fprintf(w, "uint32_t n_instructions = %d;\n", 0)
}
fmt.Fprintf(w, "uint32_t n_instructions = %d;\n", 0)
}
fmt.Fprintf(w, "__attribute__((section(\"__DATA,bshield\")))\n")
fmt.Fprintf(w, "uint32_t special_selectors_idx[] = {\n")

View File

@ -46,7 +46,7 @@ func (printer *InfoPrinter) Print() {
)
}
mc.CollectObjectiveCClasses()
mc.CollectObjectiveCClasses()
fmt.Println("======")
}

View File

@ -107,9 +107,9 @@ func (pc *ProgramContext) Process(ofile OFile) {
if pc.remove_exports {
pc.AddAction(NewRemoveExportsAction())
}
if pc.remove_string {
pc.AddAction(NewRemoveStringsAction())
}
if pc.remove_string {
pc.AddAction(NewRemoveStringsAction())
}
ExperimentalFeature("Remove Unnecessary Info", func() {
if pc.remove_others {
pc.AddAction(NewRemoveUnnecessaryInfoAction())

View File

@ -27,8 +27,8 @@ type ImportSymbol struct {
file_address uint64
lib_ordinal uint32
target uint32
high8 uint32
target uint32
high8 uint32
// push number
pnum uint32
@ -131,9 +131,9 @@ func (mc *MachoContext) CollectBindSymbolsModern() []*ImportSymbol {
pages := ([]C.ushort)(unsafe.Slice(fix.pages, fix.page_count))
reader := bytes.NewReader(mc.buf)
for page_i := 0; page_i < int(fix.page_count); page_i++ {
// loop through each page in segment, each page has size fix.page_size
// the first item in page is offset through pages[page_i]
address := int64(fix.segment) + int64(page_i) * int64(fix.page_size) + int64(pages[page_i])
// loop through each page in segment, each page has size fix.page_size
// the first item in page is offset through pages[page_i]
address := int64(fix.segment) + int64(page_i)*int64(fix.page_size) + int64(pages[page_i])
reader.Seek(address, io.SeekStart)
fmt.Printf(" page %d offset=%x\n", page_i, address)
@ -147,9 +147,9 @@ func (mc *MachoContext) CollectBindSymbolsModern() []*ImportSymbol {
var bind C.int
var ret1 C.ulonglong
var ret2 C.ulonglong
if (fix.format != 2 && fix.format != 6) {
fmt.Printf("format is %d\n", fix.format)
}
if fix.format != 2 && fix.format != 6 {
fmt.Printf("format is %d\n", fix.format)
}
next := C.ParseFixValue(C.int(fix.format), C.ulonglong(v),
&bind, &ret1, &ret2)
@ -175,8 +175,8 @@ func (mc *MachoContext) CollectBindSymbolsModern() []*ImportSymbol {
} else {
fmt.Printf("// 0x%x rebase=%d target=0x%x high8=0x%x\n", address, bind, ret1, ret2)
sym.typ = "rebase"
sym.target = uint32(ret1)
sym.high8 = uint32(ret2)
sym.target = uint32(ret1)
sym.high8 = uint32(ret2)
sym.segment = uint32(mc.findSegmentIndexAt(uint64(address)))
sym.file_address = uint64(address)
sym.next = int(next)
@ -194,7 +194,7 @@ func (mc *MachoContext) CollectBindSymbolsModern() []*ImportSymbol {
reader.Seek(0, io.SeekStart)
}
}
fmt.Printf("number of imports %d\n", len(syms))
fmt.Printf("number of imports %d\n", len(syms))
return syms
}

View File

@ -5,12 +5,12 @@ import (
"encoding/binary"
"fmt"
"io"
"os"
"math/rand"
"os"
"strings"
"time"
"unsafe"
"unsafe"
log "github.com/sirupsen/logrus"
@ -199,8 +199,8 @@ func (mc *MachoContext) RemoveUnnecessaryInfo() bool {
}
func (mc *MachoContext) RewriteHeader() {
var offset uint64
var start uint64
var offset uint64
var start uint64
if mc.Is64bit() {
start = Header_size_64
@ -208,17 +208,17 @@ func (mc *MachoContext) RewriteHeader() {
start = Header_size
}
mc.file.Seek(0, io.SeekStart)
mc.file.Seek(0, io.SeekStart)
offset = start
for _, cmd := range mc.commands {
nwrite, _ := mc.file.WriteAt(cmd.Serialize(mc), int64(offset))
offset += uint64(nwrite)
}
offset = start
for _, cmd := range mc.commands {
nwrite, _ := mc.file.WriteAt(cmd.Serialize(mc), int64(offset))
offset += uint64(nwrite)
}
mc.header.ncmds = uint32(len(mc.commands))
mc.header.sizeofcmds = uint32(offset - start)
mc.file.WriteAt(mc.header.Serialize(mc), 0)
mc.header.ncmds = uint32(len(mc.commands))
mc.header.sizeofcmds = uint32(offset - start)
mc.file.WriteAt(mc.header.Serialize(mc), 0)
}
func (mc *MachoContext) AddLoadCmd(lcmd LoadCommand) {
@ -345,7 +345,7 @@ func (mc *MachoContext) RemoveBindSymbols() {
value := C.MakeRebaseFixupOpcode(C.int(symbol.next), C.ulonglong(target), C.ulonglong(high8))
v := make([]byte, 8)
mc.byteorder.PutUint64(v, uint64(value))
fmt.Printf("change to rebase at %x\n", symbol.file_address)
fmt.Printf("change to rebase at %x\n", symbol.file_address)
mc.file.WriteAt(v, int64(symbol.file_address))
}
}
@ -565,39 +565,39 @@ func (mc *MachoContext) RemoveExportTrie() {
// this function breaks the file by adding another segment at
// the end of the file
func (mc *MachoContext) RemoveStrings() {
// add a new writable segment with a section
// loop over the instructions for adrp,add instructions
// if the access points to old cstring section, update
// with new values from the new segment and section.
// add a new writable segment with a section
// loop over the instructions for adrp,add instructions
// if the access points to old cstring section, update
// with new values from the new segment and section.
// data references, e.g., pointer to string, are compiled
// into Rebase symbol, so actively check for rebase and
// rewrite the rebase value to new segment, section offset.
// data references, e.g., pointer to string, are compiled
// into Rebase symbol, so actively check for rebase and
// rewrite the rebase value to new segment, section offset.
// save the strings into a file for recovery. should keep linear
// format as before, or customized order, if want complex.
// save the strings into a file for recovery. should keep linear
// format as before, or customized order, if want complex.
// __LINKEDIT must be at the end of the section for the binary
// to be able to resign, so we have to move __LINKEDIT down
// by how much? by the page aligned size of the added segment
// __LINKEDIT must be at the end of the section for the binary
// to be able to resign, so we have to move __LINKEDIT down
// by how much? by the page aligned size of the added segment
// but __LINKEDIT also contains data for link time data
// (fixed ups and bytecode chain) so have to modify
// their reference down too,
// symtab and dysymtab can be ignored, by removing them lmao
// but __LINKEDIT also contains data for link time data
// (fixed ups and bytecode chain) so have to modify
// their reference down too,
// symtab and dysymtab can be ignored, by removing them lmao
var cstring *Section64;
var cstring *Section64
for _, command := range mc.commands {
switch command.(type) {
case *Segment64:
var segment = command.(*Segment64)
if bytes.Compare(bytes.Trim(segment.SegName(), "\x00"), []byte("__TEXT")) == 0 {
for _, section := range segment.Sections() {
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__cstring")) == 0 {
cstring = section.(*Section64)
}
}
}
if bytes.Compare(bytes.Trim(segment.SegName(), "\x00"), []byte("__TEXT")) == 0 {
for _, section := range segment.Sections() {
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__cstring")) == 0 {
cstring = section.(*Section64)
}
}
}
continue
default:
@ -605,259 +605,257 @@ func (mc *MachoContext) RemoveStrings() {
}
}
if cstring == nil {
return
}
// get last segment, as the start point we extend from
// this assumes that the segment are ordered correctly,
// first segment offset is lower then second segment offset,
// and so on, yielding last segment is the last part of the
// binary. Our new segment add another part to the binary
// at the end.
// last segment is always the linkedits
last_segment := mc.Segments()[len(mc.Segments()) - 1]
fmt.Printf("last segment %v %s\n", last_segment, string(last_segment.SegName()))
// all data must be inside the segment (or in header)
// occupy the segment of linkedit and move linkedit down
segstart := last_segment.Vmaddr()
// segment must be page aligned, and the size is based on
// the section size
secstart := segstart
secsize := cstring.Size()
filestart := last_segment.Fileoff()
// align to page address, not sure if neccessary, because the
// loader can pick up from anywhere and load in memory (mmap)
if filestart % 0x4000 != 0 {
filestart += 0x4000 - (filestart % 0x4000)
}
fmt.Printf("section size %x\n", secsize)
secsize_aligned := uint64(0)
if secsize % 0x4000 == 0 {
// very rare, but possible, it occupies whole pages
secsize_aligned = secsize
} else {
secsize_aligned = secsize + (0x4000 - (secsize % 0x4000))
}
filesize := secsize_aligned
segname := make([]byte, 16)
copy(segname, []byte("__BSHIELD"))
secname := make([]byte, 16)
copy(secname, []byte("__secrets"))
fmt.Printf("segstart %x\n", segstart)
fmt.Printf("file_start %x\n", filestart)
// size of the section and segment is defined by the total
// space for strings required
new_cstring_section := Section64{
sectname: secname,
segname: segname,
addr: secstart,
size: secsize,
offset: uint32(filestart),
align: 3, // idk, see AddSection
reloff: 0,
nreloc: 0,
flags: 0,
reserved1: 0,
reserved2: 0,
reserved3: 0,
}
string_segment := Segment64{
c: LoadCmd{cmd: LC_SEGMENT_64, cmdsize: 0},
segname: segname,
vmaddr: segstart,
vmsize: secsize_aligned,
fileoff: filestart,
filesize: filesize,
maxprot: 3, // read/write
initprot: 3, // read/write
nsects: 1,
flags: 0,
sections: []*Section64{&new_cstring_section},
if cstring == nil {
return
}
// rewrite the header to be correct
// get last segment, as the start point we extend from
// this assumes that the segment are ordered correctly,
// first segment offset is lower then second segment offset,
// and so on, yielding last segment is the last part of the
// binary. Our new segment add another part to the binary
// at the end.
// last segment is always the linkedits
last_segment := mc.Segments()[len(mc.Segments())-1]
fmt.Printf("last segment %v %s\n", last_segment, string(last_segment.SegName()))
// all data must be inside the segment (or in header)
// occupy the segment of linkedit and move linkedit down
segstart := last_segment.Vmaddr()
// segment must be page aligned, and the size is based on
// the section size
secstart := segstart
secsize := cstring.Size()
filestart := last_segment.Fileoff()
// align to page address, not sure if neccessary, because the
// loader can pick up from anywhere and load in memory (mmap)
if filestart%0x4000 != 0 {
filestart += 0x4000 - (filestart % 0x4000)
}
fmt.Printf("section size %x\n", secsize)
secsize_aligned := uint64(0)
if secsize%0x4000 == 0 {
// very rare, but possible, it occupies whole pages
secsize_aligned = secsize
} else {
secsize_aligned = secsize + (0x4000 - (secsize % 0x4000))
}
filesize := secsize_aligned
segname := make([]byte, 16)
copy(segname, []byte("__BSHIELD"))
secname := make([]byte, 16)
copy(secname, []byte("__secrets"))
fmt.Printf("segstart %x\n", segstart)
fmt.Printf("file_start %x\n", filestart)
// size of the section and segment is defined by the total
// space for strings required
new_cstring_section := Section64{
sectname: secname,
segname: segname,
addr: secstart,
size: secsize,
offset: uint32(filestart),
align: 3, // idk, see AddSection
reloff: 0,
nreloc: 0,
flags: 0,
reserved1: 0,
reserved2: 0,
reserved3: 0,
}
string_segment := Segment64{
c: LoadCmd{cmd: LC_SEGMENT_64, cmdsize: 0},
segname: segname,
vmaddr: segstart,
vmsize: secsize_aligned,
fileoff: filestart,
filesize: filesize,
maxprot: 3, // read/write
initprot: 3, // read/write
nsects: 1,
flags: 0,
sections: []*Section64{&new_cstring_section},
}
// rewrite the header to be correct
// mc.AddLoadCmd(&string_segment)
edit_segname := make([]byte, 16)
copy(edit_segname, []byte("__LINKEDIT"))
edit_segname := make([]byte, 16)
copy(edit_segname, []byte("__LINKEDIT"))
edit_segment := Segment64{
c: LoadCmd{cmd: LC_SEGMENT_64, cmdsize: 0},
segname: edit_segname,
vmaddr: segstart + secsize_aligned, // move down
vmsize: last_segment.Vmsize(),
fileoff: filestart + filesize,
filesize: last_segment.Filesize(),
maxprot: 1, // read/write
initprot: 1, // read/write
nsects: 0,
flags: 0,
sections: []*Section64{},
c: LoadCmd{cmd: LC_SEGMENT_64, cmdsize: 0},
segname: edit_segname,
vmaddr: segstart + secsize_aligned, // move down
vmsize: last_segment.Vmsize(),
fileoff: filestart + filesize,
filesize: last_segment.Filesize(),
maxprot: 1, // read/write
initprot: 1, // read/write
nsects: 0,
flags: 0,
sections: []*Section64{},
}
// modify the segment list
mc.segments[len(mc.segments) - 1] = &string_segment
mc.segments = append(mc.segments, &edit_segment)
// modify the segment list
mc.segments[len(mc.segments)-1] = &string_segment
mc.segments = append(mc.segments, &edit_segment)
// modify the command list
for i, cmd := range mc.commands {
if cmd.(*Segment64) == last_segment.(*Segment64) {
mc.commands = append(mc.commands[:i + 1], mc.commands[i:]...)
mc.commands[i] = &string_segment
mc.commands[i + 1] = &edit_segment
break
}
}
// modify the command list
for i, cmd := range mc.commands {
if cmd.(*Segment64) == last_segment.(*Segment64) {
mc.commands = append(mc.commands[:i+1], mc.commands[i:]...)
mc.commands[i] = &string_segment
mc.commands[i+1] = &edit_segment
break
}
}
// modify offset in other commands to use new link edit offset
// modify offset in other commands to use new link edit offset
edit_offset_migrate := func (file_offset uint64) uint64 {
// they should keep the old offset,
// but the base related to linkedit is modified
relative_offset := file_offset - last_segment.Fileoff()
return relative_offset + edit_segment.Fileoff()
}
edit_offset_migrate := func(file_offset uint64) uint64 {
// they should keep the old offset,
// but the base related to linkedit is modified
relative_offset := file_offset - last_segment.Fileoff()
return relative_offset + edit_segment.Fileoff()
}
for _, cmd := range mc.commands {
if lcmd, ok := cmd.(*LinkEdit); ok {
lcmd.dataoff = uint32(edit_offset_migrate(uint64(lcmd.dataoff)))
}
if lcmd, ok := cmd.(*Symtab); ok {
lcmd.stroff = uint32(edit_offset_migrate(uint64(lcmd.stroff)))
lcmd.symoff = uint32(edit_offset_migrate(uint64(lcmd.symoff)))
}
if lcmd, ok := cmd.(*DySymtab); ok {
lcmd.indirectsymoff = uint32(edit_offset_migrate(uint64(lcmd.indirectsymoff)))
}
}
for _, cmd := range mc.commands {
if lcmd, ok := cmd.(*LinkEdit); ok {
lcmd.dataoff = uint32(edit_offset_migrate(uint64(lcmd.dataoff)))
}
if lcmd, ok := cmd.(*Symtab); ok {
lcmd.stroff = uint32(edit_offset_migrate(uint64(lcmd.stroff)))
lcmd.symoff = uint32(edit_offset_migrate(uint64(lcmd.symoff)))
}
if lcmd, ok := cmd.(*DySymtab); ok {
lcmd.indirectsymoff = uint32(edit_offset_migrate(uint64(lcmd.indirectsymoff)))
}
}
mc.RewriteHeader()
mc.RewriteHeader()
tmp_file := mc.file.Name()
tmp_file := mc.file.Name()
// has to reopen file as append
mc.file.Close()
mc.file, _ = os.OpenFile(tmp_file, os.O_RDWR|os.O_APPEND, 0644)
// has to reopen file as append
mc.file.Close()
mc.file, _ = os.OpenFile(tmp_file, os.O_RDWR | os.O_APPEND, 0644)
// make extra space
expected_end := edit_segment.Fileoff() + edit_segment.Filesize()
end, _ := mc.file.Seek(0, io.SeekEnd)
if end < int64(expected_end) {
mc.file.WriteAt(make([]byte, expected_end-uint64(end)), end)
}
// make extra space
expected_end := edit_segment.Fileoff() + edit_segment.Filesize()
end, _ := mc.file.Seek(0, io.SeekEnd)
if end < int64(expected_end) {
mc.file.WriteAt(make([]byte, expected_end - uint64(end)), end)
}
// close and reopen as read/write, the buffer at the end is now empty
mc.file.Close()
// close and reopen as read/write, the buffer at the end is now empty
mc.file.Close()
mc.file, _ = os.OpenFile(tmp_file, os.O_RDWR, 0644)
// peek at old link edit and move down
old_linkedit := make([]byte, last_segment.Filesize())
mc.file.ReadAt(old_linkedit, int64(last_segment.Fileoff()))
mc.file.WriteAt(old_linkedit, int64(edit_segment.Fileoff()))
// peek at old link edit and move down
old_linkedit := make([]byte, last_segment.Filesize())
mc.file.ReadAt(old_linkedit, int64(last_segment.Fileoff()))
mc.file.WriteAt(old_linkedit, int64(edit_segment.Fileoff()))
// prepare dummy bytes into new string segment, 0 for now
// this is a way to divert their effort, writing fake strings
// will be written again at runtime
dummy := make([]byte, edit_segment.Fileoff() - string_segment.Fileoff())
mc.file.ReadAt(dummy, int64(cstring.Offset()))
// copy(dummy, []byte("We R BShield\n"))
for i := 0; i < len(dummy); i++ {
dummy[i] = dummy[i] ^ 0x4f
}
mc.file.WriteAt(dummy, int64(string_segment.Fileoff()))
// prepare dummy bytes into new string segment, 0 for now
// this is a way to divert their effort, writing fake strings
// will be written again at runtime
dummy := make([]byte, edit_segment.Fileoff()-string_segment.Fileoff())
mc.file.ReadAt(dummy, int64(cstring.Offset()))
// copy(dummy, []byte("We R BShield\n"))
for i := 0; i < len(dummy); i++ {
dummy[i] = dummy[i] ^ 0x4f
}
mc.file.WriteAt(dummy, int64(string_segment.Fileoff()))
// TODO: erase old strings
cstring_start := uint64(cstring.Offset())
random := make([]byte, cstring.Size())
rand.Read(random)
mc.file.WriteAt(random, int64(cstring_start))
// TODO: erase old strings
cstring_start := uint64(cstring.Offset())
random := make([]byte, cstring.Size())
rand.Read(random)
mc.file.WriteAt(random, int64(cstring_start))
// re-read internal buffer
last, _ := mc.file.Seek(0, io.SeekEnd)
mc.buf = make([]byte, last)
mc.file.Seek(0, io.SeekStart)
// re-read internal buffer
last, _ := mc.file.Seek(0, io.SeekEnd)
mc.buf = make([]byte, last)
mc.file.Seek(0, io.SeekStart)
if _, err := io.ReadFull(mc.file, mc.buf); err != nil {
// panic?
// panic?
}
// loop over __TEXT,__text and find all occurances of (adrp, add)
// edit the offset to points to new region
// because adrp sets the register to the address page at its address
// (for page align 0x4000), e.g.,
// `adrp x0` instruction at 0x100003f70, yields x0 = 0x100003000
// technically, adrp can offset as far as 33-bit, roughly 4GB memory
// so we should be very free, because very few program goes this far
// but if this happens, god bless you
// loop over __TEXT,__text and find all occurances of (adrp, add)
// edit the offset to points to new region
// because adrp sets the register to the address page at its address
// (for page align 0x4000), e.g.,
// `adrp x0` instruction at 0x100003f70, yields x0 = 0x100003000
// technically, adrp can offset as far as 33-bit, roughly 4GB memory
// so we should be very free, because very few program goes this far
// but if this happens, god bless you
// encoding ADRP is actually hard hmmge?
// encoding ADRP is actually hard hmmge?
// this part uses file offsets for calculations
// this part uses file offsets for calculations
in_cstring := func (offset uint64) bool {
cstring_start := uint64(cstring.Offset())
cstring_end := cstring_start + cstring.Size()
return (offset >= cstring_start) && (offset < cstring_end)
}
in_cstring := func(offset uint64) bool {
cstring_start := uint64(cstring.Offset())
cstring_end := cstring_start + cstring.Size()
return (offset >= cstring_start) && (offset < cstring_end)
}
text := mc.segments[1]
text_start := text.Fileoff()
text_end := text_start + text.Filesize()
text := mc.segments[1]
text_start := text.Fileoff()
text_end := text_start + text.Filesize()
inst := make([]byte, 4)
for addr := text_start; addr < text_end; addr = addr + 4 {
mc.file.ReadAt(inst, int64(addr))
inst_adrp := binary.LittleEndian.Uint32(inst)
mc.file.ReadAt(inst, int64(addr + 4))
inst_add := binary.LittleEndian.Uint32(inst)
inst := make([]byte, 4)
for addr := text_start; addr < text_end; addr = addr + 4 {
mc.file.ReadAt(inst, int64(addr))
inst_adrp := binary.LittleEndian.Uint32(inst)
mc.file.ReadAt(inst, int64(addr+4))
inst_add := binary.LittleEndian.Uint32(inst)
if !(C.is_adrp(C.uint(inst_adrp)) != 0 && C.is_add(C.uint(inst_add)) != 0) {
continue
}
if !(C.is_adrp(C.uint(inst_adrp)) != 0 && C.is_add(C.uint(inst_add)) != 0) {
continue
}
base := (addr >> 12) << 12
base := (addr >> 12) << 12
// calculate the old string reference
ref_base := C.adrp_imm_get(C.uint(inst_adrp))
ref_offset := C.add_imm_get(C.uint(inst_add))
ref := base + uint64(ref_base + ref_offset)
// calculate the old string reference
ref_base := C.adrp_imm_get(C.uint(inst_adrp))
ref_offset := C.add_imm_get(C.uint(inst_add))
ref := base + uint64(ref_base+ref_offset)
if (!in_cstring(ref)) {
continue
}
if !in_cstring(ref) {
continue
}
oldstr := uint64(ref)
oldstr_relative := oldstr - uint64(cstring.Offset())
oldstr := uint64(ref)
oldstr_relative := oldstr - uint64(cstring.Offset())
// find the new string address
// using oldstr relative address to cstring section
newstr := uint64(new_cstring_section.Offset()) + oldstr_relative
newstr_base := (newstr >> 12) << 12 // to calculate new offset in adrp
newstr_offset := newstr - newstr_base // to calculate new offset in add
// find the new string address
// using oldstr relative address to cstring section
newstr := uint64(new_cstring_section.Offset()) + oldstr_relative
newstr_base := (newstr >> 12) << 12 // to calculate new offset in adrp
newstr_offset := newstr - newstr_base // to calculate new offset in add
C.adrp_imm_set((*C.uint32_t)(unsafe.Pointer(&inst_adrp)), C.uint(newstr_base - base))
C.add_imm_set((*C.uint32_t)(unsafe.Pointer(&inst_add)), C.uint(newstr_offset))
C.adrp_imm_set((*C.uint32_t)(unsafe.Pointer(&inst_adrp)), C.uint(newstr_base-base))
C.add_imm_set((*C.uint32_t)(unsafe.Pointer(&inst_add)), C.uint(newstr_offset))
binary.LittleEndian.PutUint32(inst, inst_adrp)
mc.file.WriteAt(inst, int64(addr))
binary.LittleEndian.PutUint32(inst, inst_add)
mc.file.WriteAt(inst, int64(addr + 4))
}
binary.LittleEndian.PutUint32(inst, inst_adrp)
mc.file.WriteAt(inst, int64(addr))
binary.LittleEndian.PutUint32(inst, inst_add)
mc.file.WriteAt(inst, int64(addr+4))
}
// modify the rebase table (for both opcode and fixups chain versions)
// this is for pointer references
// modify the rebase table (for both opcode and fixups chain versions)
// this is for pointer references
isModernSymbol := mc.dyldinfo == nil
isLegacySymbol := !isModernSymbol
@ -866,21 +864,21 @@ func (mc *MachoContext) RemoveStrings() {
} else {
// (high8 << 56 | target) - mach_header
ref := uint64(symbol.high8 << 56 | symbol.target)
if (!in_cstring(ref)) {
continue
}
ref := uint64(symbol.high8<<56 | symbol.target)
if !in_cstring(ref) {
continue
}
oldstr := ref
oldstr_relative := oldstr - uint64(cstring.Offset())
newstr := uint64(new_cstring_section.Offset()) + oldstr_relative
oldstr := ref
oldstr_relative := oldstr - uint64(cstring.Offset())
newstr := uint64(new_cstring_section.Offset()) + oldstr_relative
target := newstr & 0x00FFFFFFFFFFFFFF
high8 := newstr >> 56
value := C.MakeRebaseFixupOpcode(C.int(symbol.next), C.ulonglong(target), C.ulonglong(high8))
v := make([]byte, 8)
mc.byteorder.PutUint64(v, uint64(value))
fmt.Printf("string rebase change at %x\n", symbol.file_address)
fmt.Printf("string rebase change at %x\n", symbol.file_address)
mc.file.WriteAt(v, int64(symbol.file_address))
}
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"io"
"strings"
"unsafe"
"unsafe"
. "ios-wrapper/pkg/ios"
)
@ -15,10 +15,10 @@ import (
import "C"
func (mc *MachoContext) CollectObjectiveCClasses() {
var objc_const *bytes.Reader
var objc_const_start uint64
var objc_const_end uint64
// var objc_methname []byte
var objc_const *bytes.Reader
var objc_const_start uint64
var objc_const_end uint64
// var objc_methname []byte
for _, cmd := range mc.commands {
if cmd.Cmd() == LC_MAIN {
@ -29,18 +29,18 @@ func (mc *MachoContext) CollectObjectiveCClasses() {
}
var segment = cmd.(*Segment64)
// we assume the binary comes in perfect ordering, that is as laid out below
// we assume the binary comes in perfect ordering, that is as laid out below
if bytes.Compare(bytes.Trim(segment.SegName(), "\x00"), []byte("__TEXT")) == 0 {
for _, section := range segment.Sections() {
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_stubs")) == 0 {
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_methlist")) == 0 {
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_methname")) == 0 {
// objc_methname := buffer
// objc_methname := buffer
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_classname")) == 0 {
}
@ -50,8 +50,8 @@ func (mc *MachoContext) CollectObjectiveCClasses() {
}
if bytes.Compare(bytes.Trim(segment.SegName(), "\x00"), []byte("__DATA_CONST")) == 0 {
for _, section := range segment.Sections() {
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_classlist")) == 0 {
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_nlclslist")) == 0 {
@ -59,16 +59,16 @@ func (mc *MachoContext) CollectObjectiveCClasses() {
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_imageinfo")) == 0 {
}
}
}
}
if bytes.Compare(bytes.Trim(segment.SegName(), "\x00"), []byte("__DATA")) == 0 {
for _, section := range segment.Sections() {
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
reader := bytes.NewReader(buffer)
buffer := make([]byte, section.Size())
mc.file.ReadAt(buffer, int64(section.Offset()))
reader := bytes.NewReader(buffer)
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_const")) == 0 {
objc_const = reader
objc_const_start = uint64(section.Offset())
objc_const_end = objc_const_start + section.Size()
objc_const = reader
objc_const_start = uint64(section.Offset())
objc_const_end = objc_const_start + section.Size()
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_selrefs")) == 0 {
}
@ -77,90 +77,89 @@ func (mc *MachoContext) CollectObjectiveCClasses() {
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_superrefs")) == 0 {
}
if bytes.Compare(bytes.Trim(section.SectName(), "\x00"), []byte("__objc_data")) == 0 {
// this section contains a series of class_t
// struct _class_t {
// struct _class_t *isa;
// struct _class_t * const superclass;
// void *cache;
// IMP *vtable;
// struct class_ro_t *ro;
// };
// this section contains a series of class_t
// struct _class_t {
// struct _class_t *isa;
// struct _class_t * const superclass;
// void *cache;
// IMP *vtable;
// struct class_ro_t *ro;
// };
for i := uint64(0); i < (section.Size() / uint64(mc.pointersize * 5)); i++ {
var isa uint64
var superclass uint64
var cache uint64
var vtable uint64
var ro uint64
binary.Read(reader, mc.byteorder, &isa)
binary.Read(reader, mc.byteorder, &superclass)
binary.Read(reader, mc.byteorder, &cache)
binary.Read(reader, mc.byteorder, &vtable)
binary.Read(reader, mc.byteorder, &ro)
for i := uint64(0); i < (section.Size() / uint64(mc.pointersize*5)); i++ {
var isa uint64
var superclass uint64
var cache uint64
var vtable uint64
var ro uint64
binary.Read(reader, mc.byteorder, &isa)
binary.Read(reader, mc.byteorder, &superclass)
binary.Read(reader, mc.byteorder, &cache)
binary.Read(reader, mc.byteorder, &vtable)
binary.Read(reader, mc.byteorder, &ro)
fmt.Printf("at=0x%x\n", section.Offset()+uint32(i)*mc.pointersize*5)
fmt.Printf("isa=0x%x superclass=0x%x\n", isa, superclass)
fmt.Printf("cache=0x%x vtable=0x%x\n", cache, vtable)
fmt.Printf("ro=0x%x\n", ro)
fmt.Printf("at=0x%x\n", section.Offset() + uint32(i) * mc.pointersize * 5)
fmt.Printf("isa=0x%x superclass=0x%x\n", isa, superclass)
fmt.Printf("cache=0x%x vtable=0x%x\n", cache, vtable)
fmt.Printf("ro=0x%x\n", ro)
var bind int
var ret1 uint64
var ret2 uint64
C.ParseFixValue(C.int(2), C.uint64_t(ro),
(*C.int)(unsafe.Pointer(&bind)),
(*C.uint64_t)(unsafe.Pointer(&ret1)),
(*C.uint64_t)(unsafe.Pointer(&ret2)),
)
var bind int
var ret1 uint64
var ret2 uint64
C.ParseFixValue(C.int(2), C.uint64_t(ro),
(*C.int)(unsafe.Pointer(&bind)),
(*C.uint64_t)(unsafe.Pointer(&ret1)),
(*C.uint64_t)(unsafe.Pointer(&ret2)),
)
// is rebase, because ro points to objc_const
// and address is in range
if bind != 1 && ret1 >= objc_const_start && ret1 < objc_const_end {
offset := ret1 - objc_const_start
objc_const.Seek(int64(offset), 0)
// is rebase, because ro points to objc_const
// and address is in range
if (bind != 1 && ret1 >= objc_const_start && ret1 < objc_const_end) {
offset := ret1 - objc_const_start
objc_const.Seek(int64(offset), 0)
// struct _class_ro_t {
// uint32_t const flags;
// uint32_t const instanceStart;
// uint32_t const instanceSize;
// uint32_t const reserved; // only when building for 64bit targets
// const uint8_t * const ivarLayout;
// const char *const name;
// const struct _method_list_t * const baseMethods;
// const struct _protocol_list_t *const baseProtocols;
// const struct _ivar_list_t *const ivars;
// const uint8_t * const weakIvarLayout;
// const struct _prop_list_t * const properties;
// };
// struct _class_ro_t {
// uint32_t const flags;
// uint32_t const instanceStart;
// uint32_t const instanceSize;
// uint32_t const reserved; // only when building for 64bit targets
// const uint8_t * const ivarLayout;
// const char *const name;
// const struct _method_list_t * const baseMethods;
// const struct _protocol_list_t *const baseProtocols;
// const struct _ivar_list_t *const ivars;
// const uint8_t * const weakIvarLayout;
// const struct _prop_list_t * const properties;
// };
var tmp uint32
var ivarLayout uint64 // ptr
var name uint64 // ptr
var baseMethods uint64 // ptr
var baseProtocols uint64 // ptr
var ivars uint64 // ptr
var weakIvarLayout uint64 // ptr
var properties uint64 // ptr
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &ivarLayout)
binary.Read(objc_const, mc.byteorder, &name)
binary.Read(objc_const, mc.byteorder, &baseMethods)
binary.Read(objc_const, mc.byteorder, &baseProtocols)
binary.Read(objc_const, mc.byteorder, &ivars)
binary.Read(objc_const, mc.byteorder, &weakIvarLayout)
binary.Read(objc_const, mc.byteorder, &properties)
var tmp uint32
var ivarLayout uint64 // ptr
var name uint64 // ptr
var baseMethods uint64 // ptr
var baseProtocols uint64 // ptr
var ivars uint64 // ptr
var weakIvarLayout uint64 // ptr
var properties uint64 // ptr
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &tmp)
binary.Read(objc_const, mc.byteorder, &ivarLayout)
binary.Read(objc_const, mc.byteorder, &name)
binary.Read(objc_const, mc.byteorder, &baseMethods)
binary.Read(objc_const, mc.byteorder, &baseProtocols)
binary.Read(objc_const, mc.byteorder, &ivars)
binary.Read(objc_const, mc.byteorder, &weakIvarLayout)
binary.Read(objc_const, mc.byteorder, &properties)
fmt.Printf("method list: %x\n", baseMethods)
}
fmt.Printf("========\n")
}
fmt.Printf("method list: %x\n", baseMethods)
}
fmt.Printf("========\n")
}
}
}
}
}
}
}
}
@ -482,7 +481,7 @@ func (mc *MachoContext) ReworkForObjc() {
}
}
encode_movz((data_end - text_start) + (shellcode_size - len(shellcode_start))+3)
encode_movz((data_end - text_start) + (shellcode_size - len(shellcode_start)) + 3)
shellcode_offset = text_start - shellcode_size
shellcode_bytes := append(shellcode_start, offset...)
@ -519,7 +518,7 @@ func (mc *MachoContext) ReworkForObjc() {
offset += 4
}
// make __TEXT writable lol
mc.file.Seek(0, 0)
mc.file.WriteAt([]byte{0x7}, 0xa0)
// make __TEXT writable lol
mc.file.Seek(0, 0)
mc.file.WriteAt([]byte{0x7}, 0xa0)
}