initial
This commit is contained in:
215
pakUtils.go
Normal file
215
pakUtils.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package reforger_utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readString(r io.Reader, expected string) error {
|
||||
buf := make([]byte, 4)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return fmt.Errorf("failed to read identifier: %w", err)
|
||||
}
|
||||
if string(buf) != expected {
|
||||
return fmt.Errorf("expected identifier '%s', but got '%s'", expected, string(buf))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseEntry(r *bytes.Reader) (PakEntry, error) {
|
||||
var pakEntry PakEntry
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.Offset); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read file offset: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.Size); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read file size: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.OriginalSize); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read uncompressed size: %w", err)
|
||||
}
|
||||
|
||||
r.Seek(4, io.SeekCurrent) // NULL
|
||||
|
||||
if err := binary.Read(r, binary.BigEndian, &pakEntry.CompressType); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read compression type: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.BigEndian, &pakEntry.UnknownData); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read unknown data: %w", err)
|
||||
}
|
||||
|
||||
return pakEntry, nil
|
||||
}
|
||||
|
||||
func parseEntries(r *bytes.Reader, dataBlockLength int64) ([]PakEntry, error) {
|
||||
var entries []PakEntry
|
||||
read := 0
|
||||
|
||||
for r.Len() > 0 {
|
||||
// read byte to determine entry type
|
||||
entryType, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read entry type: %w", err)
|
||||
}
|
||||
|
||||
nameLength, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read name length: %w", err)
|
||||
}
|
||||
|
||||
name := make([]byte, nameLength)
|
||||
if _, err := io.ReadFull(r, name); err != nil {
|
||||
return nil, fmt.Errorf("failed to read name: %w", err)
|
||||
}
|
||||
|
||||
switch entryType {
|
||||
case 0x00: // Directory
|
||||
ent, size, err := parseDirectory(r, string(name), dataBlockLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse directory '%s': %w", string(name), err)
|
||||
}
|
||||
read += 1 + 1 + int(nameLength) + size
|
||||
entries = append(entries, ent...)
|
||||
case 0x01: // File
|
||||
pakEntry, err := parseEntry(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse entry for file '%s': %w", string(name), err)
|
||||
}
|
||||
|
||||
pakEntry.Name = string(name)
|
||||
|
||||
entries = append(entries, pakEntry)
|
||||
read += 1 + 1 + int(nameLength) + 4 + 4 + 4 + 4 + 4 + 4 // entryType + nameLength + name + offset + size + originalSize + NULL + compressType + unknownData
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type: %d", entryType)
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func parseDirectory(r *bytes.Reader, path string, dataBlockOffset int64) ([]PakEntry, int, error) {
|
||||
var entries []PakEntry
|
||||
var totalSize int
|
||||
|
||||
// read the number of entries in the directory
|
||||
var entryCount uint32
|
||||
if err := binary.Read(r, binary.LittleEndian, &entryCount); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read entry count for directory '%s': %w", path, err)
|
||||
}
|
||||
|
||||
totalSize += 4
|
||||
|
||||
for i := 0; i < int(entryCount); i++ {
|
||||
// read byte to determine entry type
|
||||
entryType, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read entry type: %w", err)
|
||||
}
|
||||
|
||||
nameLength, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read name length: %w", err)
|
||||
}
|
||||
|
||||
name := make([]byte, nameLength)
|
||||
if _, err := io.ReadFull(r, name); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read name: %w", err)
|
||||
}
|
||||
|
||||
totalSize += 1 + 1 + int(nameLength)
|
||||
|
||||
switch entryType {
|
||||
case 0x00: // Directory
|
||||
subEntries, size, err := parseDirectory(r, path+"/"+string(name), dataBlockOffset)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to parse directory '%s': %w", path+"/"+string(name), err)
|
||||
}
|
||||
entries = append(entries, subEntries...)
|
||||
totalSize += size
|
||||
case 0x01: // File
|
||||
pakEntry, err := parseEntry(r)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to parse entry for file '%s': %w", path+"/"+string(name), err)
|
||||
}
|
||||
pakEntry.Name = path + "/" + string(name)
|
||||
entries = append(entries, pakEntry)
|
||||
totalSize += 4 + 4 + 4 + 4 + 4 + 4
|
||||
}
|
||||
}
|
||||
|
||||
return entries, totalSize, nil
|
||||
}
|
||||
|
||||
func GetPakFileInfo(data []byte) ([]PakEntry, error) {
|
||||
r := bytes.NewReader(data)
|
||||
|
||||
// Read FORM header
|
||||
if err := readString(r, "FORM"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Seek(4, io.SeekCurrent) // Skip total file size
|
||||
|
||||
// Read PAC1 header
|
||||
if err := readString(r, "PAC1"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dataBlockOffset int64 = -1
|
||||
|
||||
// Find DATA and FILE chunks
|
||||
for {
|
||||
chunkIDBytes := make([]byte, 4)
|
||||
_, err := io.ReadFull(r, chunkIDBytes)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read chunk ID: %w", err)
|
||||
}
|
||||
chunkID := string(chunkIDBytes)
|
||||
|
||||
var chunkSize uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &chunkSize); err != nil {
|
||||
return nil, fmt.Errorf("failed to read chunk size for %s: %w", chunkID, err)
|
||||
}
|
||||
|
||||
switch chunkID {
|
||||
case "HEAD":
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
case "DATA":
|
||||
// The file offsets are relative to the start of the data within this block.
|
||||
// So we record the start position of the data block.
|
||||
dataBlockOffset, err = r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get DATA block offset: %w", err)
|
||||
}
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
case "FILE":
|
||||
if dataBlockOffset == -1 {
|
||||
return nil, fmt.Errorf("FILE chunk found before DATA chunk")
|
||||
}
|
||||
|
||||
// skip unknown data
|
||||
r.Seek(6, io.SeekCurrent)
|
||||
|
||||
entries, err := parseEntries(r, dataBlockOffset)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse FILE entries: %w", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
default:
|
||||
// Skip unknown chunks
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("FILE chunk not found in archive")
|
||||
}
|
||||
Reference in New Issue
Block a user