add old go tooling

This commit is contained in:
2023-05-31 16:17:03 +07:00
commit 54f1f3eb38
57 changed files with 5791 additions and 0 deletions

View File

@ -0,0 +1,184 @@
/// Contains the declaration of FatHeader and FatArch
/// These structs are always written using Big-Endian,
/// as documented in the mach-o/fat.h
/// This file also has a CreateFat function to generate
/// Fat file from a list of MachoContext
package fat
import (
"io"
"os"
"sort"
log "github.com/sirupsen/logrus"
. "ios-wrapper/pkg/ios"
macho "ios-wrapper/pkg/ios/macho"
)
/// Get the alignment for the Mach-O in Fat binary
/// The returned value is the multiplier of 2
func GetAlignment(h *macho.Header) uint32 {
switch h.Cputype() {
case CPU_TYPE_ARM, CPU_TYPE_ARM64:
return 0xe // log2(0x4000)
case CPU_TYPE_POWERPC,
CPU_TYPE_POWERPC64,
CPU_TYPE_I386,
CPU_TYPE_X86_64:
return 0xc // log2(0x1000)
default:
return 0xd // log2(0x2000)
}
}
func MachosToFatArchs(machos []*macho.MachoContext) []*FatArch {
var fa []*FatArch
for _, m := range machos {
fa = append(fa, &FatArch{
Cputype: m.Header().Cputype(),
Cpusubtype: m.Header().Cpusubtype(),
Size: m.FileSize(),
Offset: 0,
Align: GetAlignment(m.Header()),
})
}
return fa
}
/// Create a Fat binary from MachoContext
/// Convert MachoContext to FatArch
/// Calculate the alignment, now using basic calculation
/// because iOS always has valid archs ARM
///
/// Sort the Fat arch as per the cctools/lipo
/// Calculate the offset with each Mach-O
///
/// Write the FatHeader
/// Write the FatArchs
/// Write the Mach-Os
func CreateFat(machos []*macho.MachoContext, outfilename string) error {
archs := MachosToFatArchs(machos)
sort.SliceStable(archs, func(i, j int) bool {
if archs[i].Cputype == archs[j].Cputype {
return archs[i].Cpusubtype < archs[i].Cpusubtype
}
if archs[i].Cputype == CPU_TYPE_ARM64 {
return false
}
if archs[j].Cputype == CPU_TYPE_ARM64 {
return true
}
return archs[i].Cpusubtype < archs[j].Cpusubtype
})
// calculate the offset for each FatArch
offset := uint32(8) // size of fat header, bytes
// offset to the first macho
offset += uint32(len(archs)) * 5 // size of fat_arch, bytes
for _, arch := range archs {
offset = uint32(OffsetRounder(uint64(offset), 1<<arch.Align))
arch.Offset = offset
offset += arch.Size
log.WithFields(log.Fields{
"cputype": arch.Cputype,
"cpusubtype": arch.Cpusubtype,
"size": arch.Size,
"offset": arch.Offset,
"algin": arch.Align,
}).Debug("Arch to add")
}
file, err := os.OpenFile(outfilename, os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
return err
// log.Fatal("Cannot open output file")
}
var w fatWriter
w.WriteFatHeader(file, &FatHeader{
Magic: MagicFat,
Nfat_arch: uint32(len(archs)),
})
w.WriteFatArchs(file, archs)
w.WriteMachos(file, archs, machos)
return w.err
}
type fatWriter struct {
err error
}
func (fw *fatWriter) WriteFatHeader(w io.Writer, h *FatHeader) {
if fw.err != nil {
return
}
b := h.Serialize()
_, err := w.Write(b)
fw.err = err
}
func (fw *fatWriter) WriteFatArchs(w io.Writer, archs []*FatArch) {
for _, arch := range archs {
if fw.err != nil {
return
}
b := arch.Serialize()
_, err := w.Write(b)
fw.err = err
log.WithFields(log.Fields{
"cputype": arch.Cputype,
"cpusubtype": arch.Cpusubtype,
"size": arch.Size,
"offset": arch.Offset,
"algin": arch.Align,
}).Info("Attempt to write Fat Arch")
}
}
/// for each fat arch sorted, we locate the MachoContext and
/// use it to Write to the buffer
/// locating the macho by its cputype and cpusubtype
func (fw *fatWriter) WriteMachos(
w io.WriteSeeker,
archs []*FatArch,
machos []*macho.MachoContext,
) {
for _, arch := range archs {
for _, macho := range machos {
if fw.err != nil {
return
}
cputype := macho.Header().Cputype()
cpusubtype := macho.Header().Cpusubtype()
if cputype == arch.Cputype &&
cpusubtype == arch.Cpusubtype {
log.WithFields(log.Fields{
"cputype": cputype,
"cpusubtype": cpusubtype,
"offset": arch.Offset,
"size": arch.Size,
}).Info("Attempt to write Mach-O to file")
_, err1 := w.Seek(int64(arch.Offset), io.SeekStart)
_, err2 := macho.WriteBufferTo(w)
if err1 != nil {
fw.err = err1
} else if err2 != nil {
fw.err = err2
}
}
}
}
}
func OffsetRounder(v, r uint64) uint64 {
r--
v += r
v &= uint64(^int64(r))
return v
}

110
macho-go/pkg/ios/fat/fat.go Normal file
View File

@ -0,0 +1,110 @@
package fat
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"os"
. "ios-wrapper/pkg/ios"
"ios-wrapper/pkg/ios/macho"
)
type FatHeader struct {
Magic uint32
Nfat_arch uint32
}
func (h *FatHeader) Serialize() []byte {
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, h.Magic)
binary.Write(buf, binary.BigEndian, h.Nfat_arch)
return buf.Bytes()
}
type FatArch struct {
Cputype uint32
Cpusubtype uint32
Offset uint32
Size uint32
Align uint32
}
func (arch *FatArch) Serialize() []byte {
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, arch.Cputype)
binary.Write(buf, binary.BigEndian, arch.Cpusubtype)
binary.Write(buf, binary.BigEndian, arch.Offset)
binary.Write(buf, binary.BigEndian, arch.Size)
binary.Write(buf, binary.BigEndian, arch.Align)
return buf.Bytes()
}
// don't know when to use
// it seems that only use if the size of any file is > 4GB
// else just use FatArch
type FatArch64 struct {
Cputype uint32
Cpusubtype uint32
Offset uint64
Size uint64
Align uint32
}
type FatContext struct {
fatArch []*FatArch
}
func (fc *FatContext) ParseFile(file *os.File) {
r := bufio.NewReader(file)
{ // read magic to define byteorder and pointersize
var magic uint32
magic_buf := make([]byte, 4)
r.Read(magic_buf)
magic_r := bytes.NewReader(magic_buf)
binary.Read(magic_r, binary.BigEndian, &magic)
if magic != MagicFat {
return
}
}
var nfat_arch uint32
binary.Read(r, binary.BigEndian, &nfat_arch)
for i := uint32(0); i < nfat_arch; i++ {
var fat_arch FatArch
binary.Read(r, binary.BigEndian, &fat_arch.Cputype)
binary.Read(r, binary.BigEndian, &fat_arch.Cpusubtype)
binary.Read(r, binary.BigEndian, &fat_arch.Offset)
binary.Read(r, binary.BigEndian, &fat_arch.Size)
binary.Read(r, binary.BigEndian, &fat_arch.Align)
fc.fatArch = append(fc.fatArch, &fat_arch)
}
}
func (fc *FatContext) Machos() []*FatArch {
return fc.fatArch
}
func FatSplit(path string) ([]string, error) {
return CreateMachosFromFat(path)
}
// Parse files into Mach-O, calculate fat_arch and join
// into a Fat binary
// @paths: paths to Mach-O binaries
// @out: output Fat file path
// returns success?
func FatJoin(paths []string, out string) error {
machos, err := macho.MachosFromFiles(paths)
if err != nil {
return err
}
if macho.CheckDuplicateArch(machos) {
return errors.New("Duplicate Arch")
}
return CreateFat(machos, out)
}

View File

@ -0,0 +1,62 @@
package fat
import (
"fmt"
"io"
"os"
"strings"
)
// Split a Fat binary into multiple Mach-O binaries
// @ifile: path to Fat binary
// returns a list of file path of splited Mach-O binaries
func CreateMachosFromFat(ifile string) ([]string, error) {
f, err := os.OpenFile(ifile, os.O_RDONLY, 0644)
if err != nil {
fmt.Println("Cannot open file")
return []string{}, err
}
var fc FatContext
fc.ParseFile(f)
var r []string
for _, arch := range fc.Machos() {
offset := arch.Offset
size := arch.Size
buf := make([]byte, size)
filename := fmt.Sprintf(
"%s_%x",
strings.ReplaceAll(ifile, " ", "_"),
offset,
)
var err error
_, err = f.Seek(int64(offset), io.SeekStart)
if err != nil {
return r, err
}
_, err = f.Read(buf)
if err != nil {
return r, err
}
outfile, err := os.OpenFile(
filename,
os.O_CREATE|os.O_WRONLY,
0777,
)
if err != nil {
return r, err
}
_, err = outfile.Write(buf)
if err != nil {
return r, err
}
r = append(r, filename) // everything is find, append new file path
}
return r, nil
}