7 Commits
v0.0.2 ... main

Author SHA1 Message Date
Sotirios Pupakis
d941282b74 test multi 2025-09-08 22:11:47 +02:00
Sotirios Pupakis
6d725b9f5f test multiple 2025-09-08 21:30:25 +02:00
Sotirios Pupakis
93b73d7bea fix 2025-09-08 21:23:20 +02:00
Sotirios Pupakis
110d8c073d ffs 2025-09-08 04:27:09 +02:00
Sotirios Pupakis
2fa2b48611 change 2025-09-08 04:20:15 +02:00
Sotirios Pupakis
8f0fcdca4a Added reference for data 2025-09-06 23:34:53 +02:00
Sotirios Pupakis
345bd44323 Added error hadnling 2025-09-06 23:01:41 +02:00
4 changed files with 178 additions and 33 deletions

94
cmd/main.go Normal file
View File

@@ -0,0 +1,94 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path/filepath"
"gitea.tbdevent.eu/ilbinek/reforger_utils"
)
const DEBUG = false
func main() {
addonID := "61EB463A41FE5274"
version := "0.1.166"
assetsReply, err := reforger_utils.DoAssetsRequest(addonID, version)
if err != nil {
panic(err)
}
downloadDir := filepath.Join("downloads", addonID)
if DEBUG {
if err := os.MkdirAll(downloadDir, os.ModePerm); err != nil {
panic(err)
}
}
bytesReply, err := json.MarshalIndent(assetsReply, "", " ")
if err != nil {
panic(err)
}
if DEBUG {
if err := os.WriteFile(filepath.Join(downloadDir, "assets.json"), bytesReply, os.ModePerm); err != nil {
panic(err)
}
}
for _, asset := range assetsReply {
for _, file := range asset.Files {
manifest, err := reforger_utils.GetManifest(file.ManifestPath)
if err != nil {
panic(err)
}
if DEBUG {
// save manifest
var manifestBuf bytes.Buffer
manifestEncoder := json.NewEncoder(&manifestBuf)
manifestEncoder.SetIndent("", " ")
if err := manifestEncoder.Encode(manifest); err != nil {
panic(err)
}
if err := os.WriteFile(filepath.Join(downloadDir, file.Name+".manifest.json"), manifestBuf.Bytes(), os.ModePerm); err != nil {
panic(err)
}
}
downedFile, err := reforger_utils.Download(manifest, downloadDir, file.Name)
if err != nil {
panic(err)
}
fmt.Println("Downloaded", file.Name, len(downedFile), "bytes")
if file.Name[len(file.Name)-4:] != ".pak" {
continue
}
pakInfo, err := reforger_utils.GetPakFileInfo(&downedFile)
if err != nil {
panic(err)
}
for _, entry := range pakInfo {
fmt.Printf(" - %s (offset: %d, size: %d)\n", entry.Name, entry.Offset, entry.Size)
if DEBUG {
entryData := downedFile[entry.Offset : entry.Offset+entry.Size]
path := filepath.Join("downloads", addonID, "extracted", entry.Name)
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
panic(err)
}
if err := os.WriteFile(path, entryData, os.ModePerm); err != nil {
panic(err)
}
}
}
}
}
}

View File

@@ -146,8 +146,8 @@ func parseDirectory(r *bytes.Reader, path string, dataBlockOffset int64) ([]PakE
return entries, totalSize, nil
}
func GetPakFileInfo(data []byte) ([]PakEntry, error) {
r := bytes.NewReader(data)
func GetPakFileInfo(data *[]byte) ([]PakEntry, error) {
r := bytes.NewReader(*data)
// Read FORM header
if err := readString(r, "FORM"); err != nil {

View File

@@ -61,11 +61,13 @@ type Manifest struct {
Size int `json:"size"`
Offsets []int `json:"offsets"`
} `json:"remainder"`
Fragments []struct {
Sha512 string `json:"sha512"`
Size int `json:"size"`
Offsets []int `json:"offsets"`
} `json:"fragments"`
Fragments []Fragment `json:"fragments"`
}
type Fragment struct {
Sha512 string `json:"sha512"`
Size int `json:"size"`
Offsets []int `json:"offsets"`
}
type RDBHeader struct {

97
util.go
View File

@@ -9,9 +9,11 @@ import (
"path/filepath"
"slices"
"strings"
"sync"
)
var Debug = false
var DownThreads = 8
func DoAssetsRequest(addonID, version string) (AssetsReply, error) {
url := "https://api-ar-workshop.bistudio.com/workshop-api/api/v3.0/s2s/assets/download-list"
@@ -37,7 +39,7 @@ func DoAssetsRequest(addonID, version string) (AssetsReply, error) {
// print entire body
body, _ := io.ReadAll(res.Body)
fmt.Println(string(body))
panic("bad status: " + res.Status)
return AssetsReply{}, fmt.Errorf("bad status: %s", res.Status)
}
// Process response
@@ -80,7 +82,7 @@ func Download(manifest Manifest, downloadDir string, filen string) ([]byte, erro
if Debug {
// create the directory if it doesn't exist
if err := os.MkdirAll(downloadDir + "/fragments/" + filen, os.ModePerm); err != nil {
if err := os.MkdirAll(downloadDir+"/fragments/"+filen, os.ModePerm); err != nil {
return nil, err
}
}
@@ -89,7 +91,7 @@ func Download(manifest Manifest, downloadDir string, filen string) ([]byte, erro
downloaded := 0
remContent := make([]byte, 0)
currentRem := 0
if (manifest.Remainder.Size > 0) {
if manifest.Remainder.Size > 0 {
url := transformShaToURL(manifest.Remainder.Sha512, manifest.Remainder.Size)
content, err := getContent(url)
if err != nil {
@@ -109,31 +111,77 @@ func Download(manifest Manifest, downloadDir string, filen string) ([]byte, erro
trackFrag := make([]Frag, 0)
// start downloading chunks
//currentOffset := 0
ret := make([]byte, manifest.Size)
for _, fragment := range manifest.Fragments {
url := transformShaToURL(fragment.Sha512, fragment.Size)
content, err := getContent(url)
if err != nil {
return nil, err
}
downloaded += len(content) * len(fragment.Offsets)
for _, offset := range fragment.Offsets {
trackFrag = append(trackFrag, Frag{Offset: offset, Size: len(content)})
copy(ret[offset:offset+len(content)], content)
}
totalMB := float32(manifest.Size) / 1024.0 / 1024.0
if Debug {
// save fragment to file in the fragments/ directory
if err := os.WriteFile(filepath.Join(downloadDir, "fragments", filen, fmt.Sprintf("%d.%s.%d.bytes", fragment.Offsets[0], fragment.Sha512, fragment.Size)), content, os.ModePerm); err != nil {
return nil, err
var wg sync.WaitGroup
var mu sync.Mutex
fragmentsChan := make(chan Fragment, len(manifest.Fragments))
errChan := make(chan error, DownThreads)
// Start worker goroutines
for i := 0; i < DownThreads; i++ {
wg.Go(func() {
for fragment := range fragmentsChan {
url := transformShaToURL(fragment.Sha512, fragment.Size)
content, err := getContent(url)
if err != nil {
// Handle error, maybe by sending it to an error channel
// For now, we'll just skip and print.
fmt.Printf("Error downloading fragment %s: %v\n", fragment.Sha512, err)
errChan <- err
}
mu.Lock()
// Safely write to the shared slice and update progress
for _, offset := range fragment.Offsets {
copy(ret[offset:offset+len(content)], content)
}
downloaded += len(content) * len(fragment.Offsets)
// Track downloaded fragments
for _, offset := range fragment.Offsets {
trackFrag = append(trackFrag, Frag{
Offset: offset,
Size: len(content),
})
}
// Print progress
downedMB := float32(downloaded) / 1024.0 / 1024.0
percent := float32(downloaded) / float32(manifest.Size) * 100.0
fmt.Printf("\r\033[32mDownloaded\033[0m %.2fMB/%.2fMB (\033[36m%.2f%%\033[0m) of \033[33m%s\033[0m", downedMB, totalMB, percent, filen)
mu.Unlock()
// The Debug section can be safely moved here as well
if os.Getenv("DEBUG") == "true" { // Using env var for a cleaner check
mu.Lock() // Re-lock for file write
if err := os.WriteFile(filepath.Join(downloadDir, "fragments", filen, fmt.Sprintf("%d.%s.%d.bytes", fragment.Offsets[0], fragment.Sha512, fragment.Size)), content, os.ModePerm); err != nil {
fmt.Printf("Error saving file: %v\n", err)
}
mu.Unlock()
}
}
}
percent := float32(downloaded) / float32(manifest.Size) * 100.0
fmt.Printf("\r\033[32mDownloaded\033[0m %d/%d (\033[36m%.2f%%\033[0m) of \033[33m%s\033[0m", downloaded, manifest.Size, percent, filen)
})
}
// Send fragments to the channel
for _, fragment := range manifest.Fragments {
fragmentsChan <- fragment
}
close(fragmentsChan) // Close the channel to signal workers no more tasks are coming
// Wait for all workers to finish
wg.Wait()
// Check for errors
if len(errChan) > 0 {
return nil, <-errChan // Return the first error encountered
}
close(errChan)
// sort trackFrag by Offset
slices.SortFunc(trackFrag, func(a, b Frag) int {
return a.Offset - b.Offset
@@ -141,11 +189,12 @@ func Download(manifest Manifest, downloadDir string, filen string) ([]byte, erro
// check for gaps in trackFrag and fill with remainder content
curPos := 0
for _, frag := range trackFrag {
if frag.Offset > curPos {
// gap detected
gapSize := frag.Offset - curPos
if currentRem + gapSize > len(remContent) {
if currentRem+gapSize > len(remContent) {
panic("not enough remainder content to fill gap")
}
copy(ret[curPos:curPos+gapSize], remContent[currentRem:currentRem+gapSize])