initial
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
downloads/*
|
||||
3
go.mod
Normal file
3
go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module gitea.tbdevent.eu/ilbinek/reforger_utils
|
||||
|
||||
go 1.25.0
|
||||
215
pakUtils.go
Normal file
215
pakUtils.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package reforger_utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readString(r io.Reader, expected string) error {
|
||||
buf := make([]byte, 4)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return fmt.Errorf("failed to read identifier: %w", err)
|
||||
}
|
||||
if string(buf) != expected {
|
||||
return fmt.Errorf("expected identifier '%s', but got '%s'", expected, string(buf))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseEntry(r *bytes.Reader) (PakEntry, error) {
|
||||
var pakEntry PakEntry
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.Offset); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read file offset: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.Size); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read file size: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.LittleEndian, &pakEntry.OriginalSize); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read uncompressed size: %w", err)
|
||||
}
|
||||
|
||||
r.Seek(4, io.SeekCurrent) // NULL
|
||||
|
||||
if err := binary.Read(r, binary.BigEndian, &pakEntry.CompressType); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read compression type: %w", err)
|
||||
}
|
||||
|
||||
if err := binary.Read(r, binary.BigEndian, &pakEntry.UnknownData); err != nil {
|
||||
return PakEntry{}, fmt.Errorf("failed to read unknown data: %w", err)
|
||||
}
|
||||
|
||||
return pakEntry, nil
|
||||
}
|
||||
|
||||
func parseEntries(r *bytes.Reader, dataBlockLength int64) ([]PakEntry, error) {
|
||||
var entries []PakEntry
|
||||
read := 0
|
||||
|
||||
for r.Len() > 0 {
|
||||
// read byte to determine entry type
|
||||
entryType, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read entry type: %w", err)
|
||||
}
|
||||
|
||||
nameLength, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read name length: %w", err)
|
||||
}
|
||||
|
||||
name := make([]byte, nameLength)
|
||||
if _, err := io.ReadFull(r, name); err != nil {
|
||||
return nil, fmt.Errorf("failed to read name: %w", err)
|
||||
}
|
||||
|
||||
switch entryType {
|
||||
case 0x00: // Directory
|
||||
ent, size, err := parseDirectory(r, string(name), dataBlockLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse directory '%s': %w", string(name), err)
|
||||
}
|
||||
read += 1 + 1 + int(nameLength) + size
|
||||
entries = append(entries, ent...)
|
||||
case 0x01: // File
|
||||
pakEntry, err := parseEntry(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse entry for file '%s': %w", string(name), err)
|
||||
}
|
||||
|
||||
pakEntry.Name = string(name)
|
||||
|
||||
entries = append(entries, pakEntry)
|
||||
read += 1 + 1 + int(nameLength) + 4 + 4 + 4 + 4 + 4 + 4 // entryType + nameLength + name + offset + size + originalSize + NULL + compressType + unknownData
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type: %d", entryType)
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func parseDirectory(r *bytes.Reader, path string, dataBlockOffset int64) ([]PakEntry, int, error) {
|
||||
var entries []PakEntry
|
||||
var totalSize int
|
||||
|
||||
// read the number of entries in the directory
|
||||
var entryCount uint32
|
||||
if err := binary.Read(r, binary.LittleEndian, &entryCount); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read entry count for directory '%s': %w", path, err)
|
||||
}
|
||||
|
||||
totalSize += 4
|
||||
|
||||
for i := 0; i < int(entryCount); i++ {
|
||||
// read byte to determine entry type
|
||||
entryType, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read entry type: %w", err)
|
||||
}
|
||||
|
||||
nameLength, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read name length: %w", err)
|
||||
}
|
||||
|
||||
name := make([]byte, nameLength)
|
||||
if _, err := io.ReadFull(r, name); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read name: %w", err)
|
||||
}
|
||||
|
||||
totalSize += 1 + 1 + int(nameLength)
|
||||
|
||||
switch entryType {
|
||||
case 0x00: // Directory
|
||||
subEntries, size, err := parseDirectory(r, path+"/"+string(name), dataBlockOffset)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to parse directory '%s': %w", path+"/"+string(name), err)
|
||||
}
|
||||
entries = append(entries, subEntries...)
|
||||
totalSize += size
|
||||
case 0x01: // File
|
||||
pakEntry, err := parseEntry(r)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to parse entry for file '%s': %w", path+"/"+string(name), err)
|
||||
}
|
||||
pakEntry.Name = path + "/" + string(name)
|
||||
entries = append(entries, pakEntry)
|
||||
totalSize += 4 + 4 + 4 + 4 + 4 + 4
|
||||
}
|
||||
}
|
||||
|
||||
return entries, totalSize, nil
|
||||
}
|
||||
|
||||
func GetPakFileInfo(data []byte) ([]PakEntry, error) {
|
||||
r := bytes.NewReader(data)
|
||||
|
||||
// Read FORM header
|
||||
if err := readString(r, "FORM"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Seek(4, io.SeekCurrent) // Skip total file size
|
||||
|
||||
// Read PAC1 header
|
||||
if err := readString(r, "PAC1"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dataBlockOffset int64 = -1
|
||||
|
||||
// Find DATA and FILE chunks
|
||||
for {
|
||||
chunkIDBytes := make([]byte, 4)
|
||||
_, err := io.ReadFull(r, chunkIDBytes)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read chunk ID: %w", err)
|
||||
}
|
||||
chunkID := string(chunkIDBytes)
|
||||
|
||||
var chunkSize uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &chunkSize); err != nil {
|
||||
return nil, fmt.Errorf("failed to read chunk size for %s: %w", chunkID, err)
|
||||
}
|
||||
|
||||
switch chunkID {
|
||||
case "HEAD":
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
case "DATA":
|
||||
// The file offsets are relative to the start of the data within this block.
|
||||
// So we record the start position of the data block.
|
||||
dataBlockOffset, err = r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get DATA block offset: %w", err)
|
||||
}
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
case "FILE":
|
||||
if dataBlockOffset == -1 {
|
||||
return nil, fmt.Errorf("FILE chunk found before DATA chunk")
|
||||
}
|
||||
|
||||
// skip unknown data
|
||||
r.Seek(6, io.SeekCurrent)
|
||||
|
||||
entries, err := parseEntries(r, dataBlockOffset)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse FILE entries: %w", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
default:
|
||||
// Skip unknown chunks
|
||||
r.Seek(int64(chunkSize), io.SeekCurrent)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("FILE chunk not found in archive")
|
||||
}
|
||||
100
structs.go
Normal file
100
structs.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package reforger_utils
|
||||
|
||||
import "time"
|
||||
|
||||
type AssetsReply []struct {
|
||||
ID int `json:"id"`
|
||||
Blocked bool `json:"blocked"`
|
||||
DeletedAt interface{} `json:"deletedAt"`
|
||||
Private bool `json:"private"`
|
||||
GameVersion string `json:"gameVersion"`
|
||||
PlatformCompatibility string `json:"platformCompatibility"`
|
||||
PlatformCompatibilityValue int `json:"platformCompatibilityValue"`
|
||||
Accessible bool `json:"accessible"`
|
||||
Version string `json:"version"`
|
||||
TotalFileSize int `json:"totalFileSize"`
|
||||
Deleted bool `json:"deleted"`
|
||||
AccessValue int `json:"accessValue"`
|
||||
Files []struct {
|
||||
Md5 string `json:"md5"`
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
ManifestPath string `json:"manifestPath"`
|
||||
} `json:"files"`
|
||||
Asset struct {
|
||||
ID string `json:"id"`
|
||||
Meta interface{} `json:"meta"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Owned bool `json:"owned"`
|
||||
Blocked bool `json:"blocked"`
|
||||
Private bool `json:"private"`
|
||||
Summary string `json:"summary"`
|
||||
Unlisted bool `json:"unlisted"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
DependencyTree struct {
|
||||
Access string `json:"access"`
|
||||
Blocked bool `json:"blocked"`
|
||||
Private bool `json:"private"`
|
||||
Unlisted bool `json:"unlisted"`
|
||||
DeletedAt interface{} `json:"deletedAt"`
|
||||
AccessValue int `json:"accessValue"`
|
||||
GameVersion string `json:"gameVersion"`
|
||||
PlatformCompatibility string `json:"platformCompatibility"`
|
||||
PlatformCompatibilityValue int `json:"platformCompatibilityValue"`
|
||||
} `json:"dependencyTree"`
|
||||
CurrentVersionID int `json:"currentVersionId"`
|
||||
CurrentVersionSize int `json:"currentVersionSize"`
|
||||
CurrentVersionNumber string `json:"currentVersionNumber"`
|
||||
} `json:"asset"`
|
||||
Dependencies []interface{} `json:"dependencies"`
|
||||
}
|
||||
|
||||
type Manifest struct {
|
||||
Version int `json:"version"`
|
||||
Size int `json:"size"`
|
||||
Sha512 string `json:"sha512"`
|
||||
Remainder struct {
|
||||
Sha512 string `json:"sha512"`
|
||||
Size int `json:"size"`
|
||||
Offsets []int `json:"offsets"`
|
||||
} `json:"remainder"`
|
||||
Fragments []struct {
|
||||
Sha512 string `json:"sha512"`
|
||||
Size int `json:"size"`
|
||||
Offsets []int `json:"offsets"`
|
||||
} `json:"fragments"`
|
||||
}
|
||||
|
||||
type RDBHeader struct {
|
||||
Magic [4]byte // "FORM"
|
||||
Length uint32
|
||||
Tag [4]byte // "RDBC"
|
||||
Version uint32
|
||||
EntryCount uint32
|
||||
}
|
||||
|
||||
type RDBEntry struct {
|
||||
Path string
|
||||
Hash [16]byte
|
||||
CompressedSize uint32
|
||||
UncompressedSize uint32
|
||||
Flags uint32
|
||||
Offset uint64
|
||||
}
|
||||
|
||||
type Frag struct {
|
||||
Offset int
|
||||
Size int
|
||||
}
|
||||
|
||||
type PakEntry struct {
|
||||
Name string
|
||||
Offset uint32
|
||||
Size uint32
|
||||
OriginalSize uint32
|
||||
CompressType uint32
|
||||
UnknownData uint32
|
||||
}
|
||||
179
util.go
Normal file
179
util.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package reforger_utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var Debug = false
|
||||
|
||||
func DoAssetsRequest(addonID, version string) AssetsReply {
|
||||
url := "https://api-ar-workshop.bistudio.com/workshop-api/api/v3.0/s2s/assets/download-list"
|
||||
body := `{"assets":{"` + addonID + `":"` + version + `"}}`
|
||||
req, err := http.NewRequest("POST", url, strings.NewReader(body))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
req.Header.Add("x-client-id", "$edb1b7862bba5cade1f6e06bfdeac2c")
|
||||
req.Header.Add("x-client-secret", "$8b415ea2aa11bd51f2f5b5a9dcb8476")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("user-agent", "Arma Reforger/1.4.0.53 (Headless; Windows)")
|
||||
req.Header.Add("content-length", fmt.Sprintf("%d", len(body)))
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
// print entire body
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
fmt.Println(string(body))
|
||||
panic("bad status: " + res.Status)
|
||||
}
|
||||
|
||||
// Process response
|
||||
decoder := json.NewDecoder(res.Body)
|
||||
var assetsReply AssetsReply
|
||||
if err := decoder.Decode(&assetsReply); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return assetsReply
|
||||
}
|
||||
|
||||
func GetManifest(manifestUrl string) Manifest {
|
||||
url := "https://ar-gcp-cdn.bistudio.com/manifest/" + manifestUrl
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
panic("bad status: " + res.Status)
|
||||
}
|
||||
|
||||
// Decode the JSON response
|
||||
var manifest Manifest
|
||||
if err := json.NewDecoder(res.Body).Decode(&manifest); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return manifest
|
||||
}
|
||||
|
||||
func Download(manifest Manifest, downloadDir string, filen string) []byte {
|
||||
|
||||
if Debug {
|
||||
// create the directory if it doesn't exist
|
||||
if err := os.MkdirAll(downloadDir + "/fragments/" + filen, os.ModePerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// download remainder
|
||||
downloaded := 0
|
||||
remContent := make([]byte, 0)
|
||||
currentRem := 0
|
||||
if (manifest.Remainder.Size > 0) {
|
||||
url := transformShaToURL(manifest.Remainder.Sha512, manifest.Remainder.Size)
|
||||
content := getContent(url)
|
||||
remContent = content
|
||||
downloaded += len(content)
|
||||
|
||||
if Debug {
|
||||
// save remainder to file in the fragments/ directory, prefix with gap
|
||||
if err := os.WriteFile(filepath.Join(downloadDir, "fragments", filen, fmt.Sprintf("%d.%s.%d.bytes", manifest.Remainder.Offsets[0], manifest.Remainder.Sha512, manifest.Remainder.Size)), remContent, os.ModePerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trackFrag := make([]Frag, 0)
|
||||
|
||||
// start downloading chunks
|
||||
//currentOffset := 0
|
||||
ret := make([]byte, manifest.Size)
|
||||
for _, fragment := range manifest.Fragments {
|
||||
url := transformShaToURL(fragment.Sha512, fragment.Size)
|
||||
content := getContent(url)
|
||||
downloaded += len(content) * len(fragment.Offsets)
|
||||
for _, offset := range fragment.Offsets {
|
||||
trackFrag = append(trackFrag, Frag{Offset: offset, Size: len(content)})
|
||||
copy(ret[offset:offset+len(content)], content)
|
||||
}
|
||||
|
||||
if Debug {
|
||||
// save fragment to file in the fragments/ directory
|
||||
if err := os.WriteFile(filepath.Join(downloadDir, "fragments", filen, fmt.Sprintf("%d.%s.%d.bytes", fragment.Offsets[0], fragment.Sha512, fragment.Size)), content, os.ModePerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
percent := float32(downloaded) / float32(manifest.Size) * 100.0
|
||||
fmt.Printf("\r\033[32mDownloaded\033[0m %d/%d (\033[36m%.2f%%\033[0m) of \033[33m%s\033[0m", downloaded, manifest.Size, percent, filen)
|
||||
}
|
||||
|
||||
// sort trackFrag by Offset
|
||||
slices.SortFunc(trackFrag, func(a, b Frag) int {
|
||||
return a.Offset - b.Offset
|
||||
})
|
||||
|
||||
// check for gaps in trackFrag and fill with remainder content
|
||||
curPos := 0
|
||||
for _, frag := range trackFrag {
|
||||
if frag.Offset > curPos {
|
||||
// gap detected
|
||||
gapSize := frag.Offset - curPos
|
||||
if currentRem + gapSize > len(remContent) {
|
||||
panic("not enough remainder content to fill gap")
|
||||
}
|
||||
copy(ret[curPos:curPos+gapSize], remContent[currentRem:currentRem+gapSize])
|
||||
remContent = remContent[gapSize:]
|
||||
}
|
||||
curPos = frag.Offset + frag.Size
|
||||
}
|
||||
|
||||
// Append rest of remainder content
|
||||
copy(ret[curPos:], remContent)
|
||||
fmt.Println()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func getContent(url string) []byte {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
panic("bad status: " + resp.Status)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func transformShaToURL(sha512s string, size int) string {
|
||||
return "https://ar-gcp-cdn.bistudio.com/fragment/" + sha512s[0:4] + "/" + sha512s[4:8] + "/" + sha512s[8:] + "/" + fmt.Sprintf("%d", size)
|
||||
}
|
||||
Reference in New Issue
Block a user