diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6e1bf15
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,662 @@
+# Created by https://www.toptal.com/developers/gitignore/api/visualstudio,rider,csharp
+# Edit at https://www.toptal.com/developers/gitignore?templates=visualstudio,rider,csharp
+
+### Csharp ###
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+##
+## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore
+
+# User-specific files
+*.rsuser
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Mono auto generated files
+mono_crash.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+[Ww][Ii][Nn]32/
+[Aa][Rr][Mm]/
+[Aa][Rr][Mm]64/
+bld/
+[Bb]in/
+[Oo]bj/
+[Ll]og/
+[Ll]ogs/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+# Uncomment if you have tasks that create the project's static files in wwwroot
+#wwwroot/
+
+# Visual Studio 2017 auto generated files
+Generated\ Files/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUnit
+*.VisualState.xml
+TestResult.xml
+nunit-*.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+# Benchmark Results
+BenchmarkDotNet.Artifacts/
+
+# .NET Core
+project.lock.json
+project.fragment.lock.json
+artifacts/
+
+# ASP.NET Scaffolding
+ScaffoldingReadMe.txt
+
+# StyleCop
+StyleCopReport.xml
+
+# Files built by Visual Studio
+*_i.c
+*_p.c
+*_h.h
+*.ilk
+*.meta
+*.obj
+*.iobj
+*.pch
+*.pdb
+*.ipdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*_wpftmp.csproj
+*.log
+*.tlog
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opendb
+*.opensdf
+*.sdf
+*.cachefile
+*.VC.db
+*.VC.VC.opendb
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+*.sap
+
+# Visual Studio Trace Files
+*.e2e
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# AxoCover is a Code Coverage Tool
+.axoCover/*
+!.axoCover/settings.json
+
+# Coverlet is a free, cross platform Code Coverage Tool
+coverage*.json
+coverage*.xml
+coverage*.info
+
+# Visual Studio code coverage results
+*.coverage
+*.coveragexml
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+nCrunchTemp_*
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+PublishScripts/
+
+# NuGet Packages
+*.nupkg
+# NuGet Symbol Packages
+*.snupkg
+# The packages folder can be ignored because of Package Restore
+**/[Pp]ackages/*
+# except build/, which is used as an MSBuild target.
+!**/[Pp]ackages/build/
+# Uncomment if necessary however generally it will be regenerated when needed
+#!**/[Pp]ackages/repositories.config
+# NuGet v3's project.json files produces more ignorable files
+*.nuget.props
+*.nuget.targets
+
+# Microsoft Azure Build Output
+csx/
+*.build.csdef
+
+# Microsoft Azure Emulator
+ecf/
+rcf/
+
+# Windows Store app package directories and files
+AppPackages/
+BundleArtifacts/
+Package.StoreAssociation.xml
+_pkginfo.txt
+*.appx
+*.appxbundle
+*.appxupload
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+*.[Cc]ache
+# but keep track of directories ending in .cache
+!?*.[Cc]ache/
+
+# Others
+ClientBin/
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.jfm
+*.pfx
+*.publishsettings
+orleans.codegen.cs
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+#*.snk
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+#bower_components/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+ServiceFabricBackup/
+*.rptproj.bak
+
+# SQL Server files
+*.mdf
+*.ldf
+*.ndf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+*.rptproj.rsuser
+*- [Bb]ackup.rdl
+*- [Bb]ackup ([0-9]).rdl
+*- [Bb]ackup ([0-9][0-9]).rdl
+
+# Microsoft Fakes
+FakesAssemblies/
+
+# GhostDoc plugin setting file
+*.GhostDoc.xml
+
+# Node.js Tools for Visual Studio
+.ntvs_analysis.dat
+node_modules/
+
+# Visual Studio 6 build log
+*.plg
+
+# Visual Studio 6 workspace options file
+*.opt
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+*.vbw
+
+# Visual Studio 6 auto-generated project file (contains which files were open etc.)
+*.vbp
+
+# Visual Studio 6 workspace and project file (working project files containing files to include in project)
+*.dsw
+*.dsp
+
+# Visual Studio 6 technical files
+
+# Visual Studio LightSwitch build output
+**/*.HTMLClient/GeneratedArtifacts
+**/*.DesktopClient/GeneratedArtifacts
+**/*.DesktopClient/ModelManifest.xml
+**/*.Server/GeneratedArtifacts
+**/*.Server/ModelManifest.xml
+_Pvt_Extensions
+
+# Paket dependency manager
+.paket/paket.exe
+paket-files/
+
+# FAKE - F# Make
+.fake/
+
+# CodeRush personal settings
+.cr/personal
+
+# Python Tools for Visual Studio (PTVS)
+__pycache__/
+*.pyc
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+*.tss
+
+# Telerik's JustMock configuration file
+*.jmconfig
+
+# BizTalk build output
+*.btp.cs
+*.btm.cs
+*.odx.cs
+*.xsd.cs
+
+# OpenCover UI analysis results
+OpenCover/
+
+# Azure Stream Analytics local run output
+ASALocalRun/
+
+# MSBuild Binary and Structured Log
+*.binlog
+
+# NVidia Nsight GPU debugger configuration file
+*.nvuser
+
+# MFractors (Xamarin productivity tool) working folder
+.mfractor/
+
+# Local History for Visual Studio
+.localhistory/
+
+# Visual Studio History (VSHistory) files
+.vshistory/
+
+# BeatPulse healthcheck temp database
+healthchecksdb
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+MigrationBackup/
+
+# Ionide (cross platform F# VS Code tools) working folder
+.ionide/
+
+# Fody - auto-generated XML schema
+FodyWeavers.xsd
+
+# VS Code files for those working on multiple tools
+.vscode/*
+!.vscode/settings.json
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+*.code-workspace
+
+# Local History for Visual Studio Code
+.history/
+
+# Windows Installer files from build outputs
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# JetBrains Rider
+*.sln.iml
+
+### Rider ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# AWS User-specific
+.idea/**/aws.xml
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/jarRepositories.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# SonarLint plugin
+.idea/sonarlint/
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### VisualStudio ###
+
+# User-specific files
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+
+# Mono auto generated files
+
+# Build results
+
+# Visual Studio 2015/2017 cache/options directory
+# Uncomment if you have tasks that create the project's static files in wwwroot
+
+# Visual Studio 2017 auto generated files
+
+# MSTest test Results
+
+# NUnit
+
+# Build Results of an ATL Project
+
+# Benchmark Results
+
+# .NET Core
+
+# ASP.NET Scaffolding
+
+# StyleCop
+
+# Files built by Visual Studio
+
+# Chutzpah Test files
+
+# Visual C++ cache files
+
+# Visual Studio profiler
+
+# Visual Studio Trace Files
+
+# TFS 2012 Local Workspace
+
+# Guidance Automation Toolkit
+
+# ReSharper is a .NET coding add-in
+
+# TeamCity is a build add-in
+
+# DotCover is a Code Coverage Tool
+
+# AxoCover is a Code Coverage Tool
+
+# Coverlet is a free, cross platform Code Coverage Tool
+
+# Visual Studio code coverage results
+
+# NCrunch
+
+# MightyMoose
+
+# Web workbench (sass)
+
+# Installshield output folder
+
+# DocProject is a documentation generator add-in
+
+# Click-Once directory
+
+# Publish Web Output
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+
+# NuGet Packages
+# NuGet Symbol Packages
+# The packages folder can be ignored because of Package Restore
+# except build/, which is used as an MSBuild target.
+# Uncomment if necessary however generally it will be regenerated when needed
+# NuGet v3's project.json files produces more ignorable files
+
+# Microsoft Azure Build Output
+
+# Microsoft Azure Emulator
+
+# Windows Store app package directories and files
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+# but keep track of directories ending in .cache
+
+# Others
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+
+# RIA/Silverlight projects
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+
+# SQL Server files
+
+# Business Intelligence projects
+
+# Microsoft Fakes
+
+# GhostDoc plugin setting file
+
+# Node.js Tools for Visual Studio
+
+# Visual Studio 6 build log
+
+# Visual Studio 6 workspace options file
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+
+# Visual Studio 6 auto-generated project file (contains which files were open etc.)
+
+# Visual Studio 6 workspace and project file (working project files containing files to include in project)
+
+# Visual Studio 6 technical files
+
+# Visual Studio LightSwitch build output
+
+# Paket dependency manager
+
+# FAKE - F# Make
+
+# CodeRush personal settings
+
+# Python Tools for Visual Studio (PTVS)
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+
+# Telerik's JustMock configuration file
+
+# BizTalk build output
+
+# OpenCover UI analysis results
+
+# Azure Stream Analytics local run output
+
+# MSBuild Binary and Structured Log
+
+# NVidia Nsight GPU debugger configuration file
+
+# MFractors (Xamarin productivity tool) working folder
+
+# Local History for Visual Studio
+
+# Visual Studio History (VSHistory) files
+
+# BeatPulse healthcheck temp database
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+
+# Ionide (cross platform F# VS Code tools) working folder
+
+# Fody - auto-generated XML schema
+
+# VS Code files for those working on multiple tools
+
+# Local History for Visual Studio Code
+
+# Windows Installer files from build outputs
+
+# JetBrains Rider
+
+### VisualStudio Patch ###
+# Additional files built by Visual Studio
+
+# End of https://www.toptal.com/developers/gitignore/api/visualstudio,rider,csharp
diff --git a/.idea/.idea.Fossil Fighters Tool/.idea/.gitignore b/.idea/.idea.Fossil Fighters Tool/.idea/.gitignore
new file mode 100644
index 0000000..50079b0
--- /dev/null
+++ b/.idea/.idea.Fossil Fighters Tool/.idea/.gitignore
@@ -0,0 +1,13 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Rider ignored files
+/modules.xml
+/projectSettingsUpdater.xml
+/contentModel.xml
+/.idea.Fossil Fighters Tool.iml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/.idea.Fossil Fighters Tool/.idea/encodings.xml b/.idea/.idea.Fossil Fighters Tool/.idea/encodings.xml
new file mode 100644
index 0000000..df87cf9
--- /dev/null
+++ b/.idea/.idea.Fossil Fighters Tool/.idea/encodings.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/.idea.Fossil Fighters Tool/.idea/indexLayout.xml b/.idea/.idea.Fossil Fighters Tool/.idea/indexLayout.xml
new file mode 100644
index 0000000..7b08163
--- /dev/null
+++ b/.idea/.idea.Fossil Fighters Tool/.idea/indexLayout.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/.idea.Fossil Fighters Tool/.idea/vcs.xml b/.idea/.idea.Fossil Fighters Tool/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/.idea.Fossil Fighters Tool/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CSharp/DSDecmp.sln b/CSharp/DSDecmp.sln
new file mode 100644
index 0000000..72de3cc
--- /dev/null
+++ b/CSharp/DSDecmp.sln
@@ -0,0 +1,66 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DSDecmp", "DSDecmp\DSDecmp.csproj", "{E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "GoldenSunDD", "GoldenSunDD\GoldenSunDD.csproj", "{8CE72663-0036-4A94-BD70-99AFE7CEEC0C}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LuminousArc", "LuminousArc\LuminousArc.csproj", "{4BD8DF5C-E971-45D1-B170-340D22DDB351}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tester", "Tester\Tester.csproj", "{A4FABF4B-59F2-4D4B-9012-FF177980EAB7}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|Mixed Platforms = Debug|Mixed Platforms
+ Debug|x86 = Debug|x86
+ Release|Any CPU = Release|Any CPU
+ Release|Mixed Platforms = Release|Mixed Platforms
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Mixed Platforms.Build.0 = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|x86.ActiveCfg = Release|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Release|Mixed Platforms.Build.0 = Release|Any CPU
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}.Release|x86.ActiveCfg = Release|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Release|Any CPU.Build.0 = Release|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Release|Mixed Platforms.Build.0 = Release|Any CPU
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}.Release|x86.ActiveCfg = Release|Any CPU
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Debug|Any CPU.ActiveCfg = Debug|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Debug|Mixed Platforms.ActiveCfg = Debug|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Debug|Mixed Platforms.Build.0 = Debug|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Debug|x86.ActiveCfg = Debug|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Debug|x86.Build.0 = Debug|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Release|Any CPU.ActiveCfg = Release|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Release|Mixed Platforms.ActiveCfg = Release|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Release|Mixed Platforms.Build.0 = Release|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Release|x86.ActiveCfg = Release|x86
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}.Release|x86.Build.0 = Release|x86
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/CSharp/DSDecmp/DSDecmp.csproj b/CSharp/DSDecmp/DSDecmp.csproj
new file mode 100644
index 0000000..e4e73cc
--- /dev/null
+++ b/CSharp/DSDecmp/DSDecmp.csproj
@@ -0,0 +1,77 @@
+
+
+
+ Debug
+ AnyCPU
+ 9.0.21022
+ 2.0
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}
+ Exe
+ Properties
+ DSDecmp
+ DSDecmp
+
+
+
+
+ 3.5
+ v2.0
+
+
+
+ true
+ full
+ false
+ bin\Debug\
+ DEBUG;TRACE
+ prompt
+ 4
+ true
+
+
+ pdbonly
+ true
+ bin\Release\
+ TRACE
+ prompt
+ 4
+ true
+ bin\Release\DSDecmp.XML
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CSharp/DSDecmp/Exceptions/InputTooLargeException.cs b/CSharp/DSDecmp/Exceptions/InputTooLargeException.cs
new file mode 100644
index 0000000..4c3c94f
--- /dev/null
+++ b/CSharp/DSDecmp/Exceptions/InputTooLargeException.cs
@@ -0,0 +1,20 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp
+{
+ ///
+ /// An exception indicating that the file cannot be compressed, because the decompressed size
+ /// cannot be represented in the current compression format.
+ ///
+ public class InputTooLargeException : Exception
+ {
+ ///
+ /// Creates a new exception that indicates that the input is too big to be compressed.
+ ///
+ public InputTooLargeException()
+ : base("The compression ratio is not high enough to fit the input "
+ + "in a single compressed file.") { }
+ }
+}
diff --git a/CSharp/DSDecmp/Exceptions/NotEnoughDataException.cs b/CSharp/DSDecmp/Exceptions/NotEnoughDataException.cs
new file mode 100644
index 0000000..88bd32c
--- /dev/null
+++ b/CSharp/DSDecmp/Exceptions/NotEnoughDataException.cs
@@ -0,0 +1,39 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp
+{
+ ///
+ /// An exception that is thrown by the decompression functions when there
+ /// is not enough data available in order to properly decompress the input.
+ ///
+ public class NotEnoughDataException : IOException
+ {
+ private long currentOutSize;
+ private long totalOutSize;
+ ///
+ /// Gets the actual number of written bytes.
+ ///
+ public long WrittenLength { get { return this.currentOutSize; } }
+ ///
+ /// Gets the number of bytes that was supposed to be written.
+ ///
+ public long DesiredLength { get { return this.totalOutSize; } }
+
+ ///
+ /// Creates a new NotEnoughDataException.
+ ///
+ /// The actual number of written bytes.
+ /// The desired number of written bytes.
+ public NotEnoughDataException(long currentOutSize, long totalOutSize)
+ : base("Not enough data availble; 0x" + currentOutSize.ToString("X")
+ + " of " + (totalOutSize < 0 ? "???" : ("0x" + totalOutSize.ToString("X")))
+ + " bytes written.")
+ {
+ this.currentOutSize = currentOutSize;
+ this.totalOutSize = totalOutSize;
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Exceptions/StreamTooShortException.cs b/CSharp/DSDecmp/Exceptions/StreamTooShortException.cs
new file mode 100644
index 0000000..8164f35
--- /dev/null
+++ b/CSharp/DSDecmp/Exceptions/StreamTooShortException.cs
@@ -0,0 +1,22 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp
+{
+ ///
+ /// An exception thrown by the compression or decompression function, indicating that the
+ /// given input length was too large for the given input stream.
+ ///
+ public class StreamTooShortException : EndOfStreamException
+ {
+ ///
+ /// Creates a new exception that indicates that the stream was shorter than the given input length.
+ ///
+ public StreamTooShortException()
+ : base("The end of the stream was reached "
+ + "before the given amout of data was read.")
+ { }
+ }
+}
diff --git a/CSharp/DSDecmp/Exceptions/TooMuchInputException.cs b/CSharp/DSDecmp/Exceptions/TooMuchInputException.cs
new file mode 100644
index 0000000..d523b5d
--- /dev/null
+++ b/CSharp/DSDecmp/Exceptions/TooMuchInputException.cs
@@ -0,0 +1,32 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp
+{
+ ///
+ /// An exception indication that the input has more data than required in order
+ /// to decompress it. This may indicate that more sub-files are present in the file.
+ ///
+ public class TooMuchInputException : Exception
+ {
+ ///
+ /// Gets the number of bytes read by the decompressed to decompress the stream.
+ ///
+ public long ReadBytes { get; private set; }
+
+ ///
+ /// Creates a new exception indicating that the input has more data than necessary for
+ /// decompressing th stream. It may indicate that other data is present after the compressed
+ /// stream.
+ ///
+ /// The number of bytes read by the decompressor.
+ /// The indicated length of the input stream.
+ public TooMuchInputException(long readBytes, long totLength)
+ : base("The input contains more data than necessary. Only used 0x"
+ + readBytes.ToString("X") + " of 0x" + totLength.ToString("X") + " bytes")
+ {
+ this.ReadBytes = readBytes;
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/CompositeFormat.cs b/CSharp/DSDecmp/Formats/CompositeFormat.cs
new file mode 100644
index 0000000..4f7a394
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/CompositeFormat.cs
@@ -0,0 +1,194 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats
+{
+ ///
+ /// A format that is composed of multiple formats.
+ /// When compressing, the input is compressed using the best contained format.
+ /// When decompressing, all contained formats will try to decompress the file, until one succeeds.
+ ///
+ public abstract class CompositeFormat : CompressionFormat
+ {
+ ///
+ /// The actual list of formats this format is somposed of.
+ ///
+ private List formats;
+
+
+ #region Constructors
+
+ ///
+ /// Creates a new composite format based on the given sequence of formats.
+ ///
+ protected CompositeFormat(IEnumerable formats)
+ {
+ this.formats = new List(formats);
+ }
+ ///
+ /// Creates a new composite format based on the given formats.
+ ///
+ protected CompositeFormat(params CompressionFormat[] formats)
+ {
+ this.formats = new List(formats);
+ }
+
+ #endregion
+
+
+ #region Method: Supports
+ ///
+ /// Checks if any of the contained formats supports the given input.
+ ///
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ foreach (CompressionFormat fmt in this.formats)
+ {
+ if (fmt.Supports(stream, inLength))
+ return true;
+ }
+ return false;
+ }
+ #endregion
+
+ #region Method: Decompress
+ ///
+ /// Attempts to decompress the given input by letting all contained formats
+ /// try to decompress the input.
+ ///
+ public override long Decompress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ byte[] inputData = new byte[instream.Length];
+ instream.Read(inputData, 0, inputData.Length);
+
+ foreach (CompressionFormat format in this.formats)
+ {
+ if (!format.SupportsDecompression)
+ continue;
+ using (MemoryStream input = new MemoryStream(inputData))
+ {
+ if (!format.Supports(input, inputData.Length))
+ continue;
+ MemoryStream output = new MemoryStream();
+ try
+ {
+ long decLength = format.Decompress(input, inputData.Length, output);
+ if (decLength > 0)
+ {
+ output.WriteTo(outstream);
+ return decLength;
+ }
+ }
+ catch (Exception) { continue; }
+ }
+ }
+
+ throw new InvalidDataException("Input cannot be decompressed using the " + this.ShortFormatString + " formats.");
+ }
+ #endregion
+
+ #region Method: Compress & Field: LastUsedCompressFormatString
+ ///
+ /// Gets the ShortFormatString of the last CompressionFormat that was used to compress input.
+ ///
+ public string LastUsedCompressFormatString { get; private set; }
+ ///
+ /// Compresses the given input using the contained format that yields the best results in terms of
+ /// size reduction.
+ ///
+ public override int Compress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ // only read the input data once from the file.
+ byte[] inputData = new byte[instream.Length];
+ instream.Read(inputData, 0, inputData.Length);
+
+ MemoryStream bestOutput = null;
+ string bestFormatString = "";
+ int minCompSize = int.MaxValue;
+ foreach (CompressionFormat format in formats)
+ {
+ if (!format.SupportsCompression)
+ continue;
+
+ #region compress the file in each format, and save the best one
+
+ MemoryStream currentOutput = new MemoryStream();
+ int currentOutSize;
+ try
+ {
+ using (MemoryStream input = new MemoryStream(inputData))
+ {
+ currentOutSize = format.Compress(input, input.Length, currentOutput);
+ }
+ }
+ catch (InputTooLargeException i)
+ {
+ Console.WriteLine(i.Message);
+ bestFormatString = format.ShortFormatString;
+ return -1;
+ }
+ catch (Exception)
+ {
+ continue;
+ }
+ if (currentOutSize < minCompSize)
+ {
+ bestOutput = currentOutput;
+ minCompSize = currentOutSize;
+ bestFormatString = format.ShortFormatString;
+ }
+
+ #endregion
+ }
+
+ if (bestOutput == null)
+ return -1;
+ bestOutput.WriteTo(outstream);
+ this.LastUsedCompressFormatString = bestFormatString;
+ return minCompSize;
+ }
+ #endregion
+
+ #region Method: ParseCompressionOptions(args)
+ ///
+ /// Handles the compression options for each of the contained compression formats.
+ ///
+ public override int ParseCompressionOptions(string[] args)
+ {
+ // try each option on each of the formats.
+ // each pass over the formats lets them try to consume the options.
+ // if one or more formats consume at least one option, the maximum number
+ // of consumed options is treated as 'handled'; they are ignored in the
+ // next pass. This continues until none of the formats consume the next
+ // value in the options.
+
+ int totalOptionCount = 0;
+ bool usedOption = true;
+ while (usedOption)
+ {
+ usedOption = false;
+ if (args.Length <= totalOptionCount)
+ break;
+ int maxOptionCount = 0;
+ string[] subArray = new string[args.Length - totalOptionCount];
+ Array.Copy(args, totalOptionCount, subArray, 0, subArray.Length);
+ foreach (CompressionFormat format in this.formats)
+ {
+ int optCount = format.ParseCompressionOptions(subArray);
+ maxOptionCount = Math.Max(optCount, maxOptionCount);
+ }
+
+ if (maxOptionCount > 0)
+ {
+ totalOptionCount += maxOptionCount;
+ usedOption = true;
+ }
+ }
+ return totalOptionCount;
+ }
+ #endregion
+
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/CompressionFormat.cs b/CSharp/DSDecmp/Formats/CompressionFormat.cs
new file mode 100644
index 0000000..7708e80
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/CompressionFormat.cs
@@ -0,0 +1,142 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp
+{
+ ///
+ /// Base class for all compression formats.
+ ///
+ public abstract class CompressionFormat
+ {
+ ///
+ /// Checks if the decompressor for this format supports the given file. Assumes the
+ /// file exists. Returns false when it is certain that the given file is not supported.
+ /// False positives may occur, as this method should not do any decompression, and
+ /// may mis-interpret a similar file format as compressed.
+ ///
+ /// The name of the file to check.
+ /// False if the file can certainly not be decompressed using this decompressor.
+ /// True if the file may potentially be decompressed using this decompressor.
+ public virtual bool Supports(string file)
+ {
+ // open the file, and delegate to the decompressor-specific code.
+ using (FileStream fstr = new FileStream(file, FileMode.Open))
+ {
+ return this.Supports(fstr, fstr.Length);
+ }
+ }
+
+ ///
+ /// Checks if the decompressor for this format supports the data from the given stream.
+ /// Returns false when it is certain that the given data is not supported.
+ /// False positives may occur, as this method should not do any decompression, and may
+ /// mis-interpret a similar data format as compressed.
+ ///
+ /// The stream that may or may not contain compressed data. The
+ /// position of this stream may change during this call, but will be returned to its
+ /// original position when the method returns.
+ /// The length of the input stream.
+ /// False if the data can certainly not be decompressed using this decompressor.
+ /// True if the data may potentially be decompressed using this decompressor.
+ public abstract bool Supports(Stream stream, long inLength);
+
+ ///
+ /// Decompresses the given file, writing the deocmpressed data to the given output file.
+ /// The output file will be overwritten if it already exists.
+ /// Assumes Supports(infile)
returns true
.
+ ///
+ /// The file to decompress.
+ /// The target location of the decompressed file.
+ public virtual void Decompress(string infile, string outfile)
+ {
+ // make sure the output directory exists
+ string outDirectory = Path.GetDirectoryName(outfile);
+ if (!Directory.Exists(outDirectory))
+ Directory.CreateDirectory(outDirectory);
+ // open the two given files, and delegate to the format-specific code.
+ using (FileStream inStream = new FileStream(infile, FileMode.Open),
+ outStream = new FileStream(outfile, FileMode.Create))
+ {
+ this.Decompress(inStream, inStream.Length, outStream);
+ }
+ }
+
+ ///
+ /// Decompresses the given stream, writing the decompressed data to the given output stream.
+ /// Assumes Supports(instream)
returns true
.
+ /// After this call, the input stream will be positioned at the end of the compressed stream,
+ /// or at the initial position + inLength
, whichever comes first.
+ ///
+ /// The stream to decompress. At the end of this method, the position
+ /// of this stream is directly after the compressed data.
+ /// The length of the input data. Not necessarily all of the
+ /// input data may be read (if there is padding, for example), however never more than
+ /// this number of bytes is read from the input stream.
+ /// The stream to write the decompressed data to.
+ /// The length of the output data.
+ /// When the given length of the input data
+ /// is not enough to properly decompress the input.
+ public abstract long Decompress(Stream instream, long inLength, Stream outstream);
+
+ ///
+ /// Compresses the given input file, and writes the compressed data to the given
+ /// output file.
+ ///
+ /// The file to compress.
+ /// The file to write the compressed data to.
+ /// The size of the compressed file. If -1, the file could not be compressed.
+ public int Compress(string infile, string outfile)
+ {
+ // make sure the output directory exists
+ string outDirectory = Path.GetDirectoryName(outfile);
+ if (!Directory.Exists(outDirectory))
+ Directory.CreateDirectory(outDirectory);
+ // open the proper Streams, and delegate to the format-specific code.
+ using (FileStream inStream = File.Open(infile, FileMode.Open),
+ outStream = File.Create(outfile))
+ {
+ return this.Compress(inStream, inStream.Length, outStream);
+ }
+ }
+
+ ///
+ /// Compresses the next inLength
bytes from the input stream,
+ /// and writes the compressed data to the given output stream.
+ ///
+ /// The stream to read plaintext data from.
+ /// The length of the plaintext data.
+ /// The stream to write the compressed data to.
+ /// The size of the compressed stream. If -1, the file could not be compressed.
+ public abstract int Compress(Stream instream, long inLength, Stream outstream);
+
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public abstract string ShortFormatString { get; }
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public abstract string Description { get; }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public abstract bool SupportsCompression { get; }
+ ///
+ /// Gets if this format supports decompressing a file.
+ ///
+ public virtual bool SupportsDecompression { get { return true; } }
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public abstract string CompressionFlag { get; }
+ ///
+ /// Parses any input specific for this format. Does nothing by default.
+ ///
+ /// Any arguments that may be used by the format.
+ /// The number of input arguments consumed by this format.
+ public virtual int ParseCompressionOptions(string[] args) { return 0; }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/LZOvl.cs b/CSharp/DSDecmp/Formats/LZOvl.cs
new file mode 100644
index 0000000..ae0bda0
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/LZOvl.cs
@@ -0,0 +1,732 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats
+{
+ ///
+ /// The LZ-Overlay compression format. Compresses part of the file from end to start.
+ /// Is used for the 'overlay' files in NDS games, as well as arm9.bin.
+ /// Note that the last 12 bytes should not be included in the 'inLength' argument when
+ /// decompressing arm9.bin. This is done automatically if a file is given instead of a stream.
+ ///
+ public sealed class LZOvl : CompressionFormat
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "LZ-Ovl"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "Reverse LZ format, mainly used in 'overlay' files of NDS games."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "lzovl"; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ private static bool lookAhead = false;
+ ///
+ /// Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ /// with the LZ-Ovl format. The default is false, which is what is used in the original
+ /// implementation.
+ ///
+ public static bool LookAhead
+ {
+ set { lookAhead = value; }
+ }
+
+ ///
+ /// Checks if the given aguments have the '-opt' option, which makes this format
+ /// compress using (near-)optimal compression instead of the original compression algorithm.
+ ///
+ public override int ParseCompressionOptions(string[] args)
+ {
+ if (args.Length > 0)
+ if (args[0] == "-opt")
+ {
+ LookAhead = true;
+ return 1;
+ }
+ return 0;
+ }
+
+ #region Method: Supports(string file)
+ ///
+ /// Checks if this format supports decompressing the given file.
+ ///
+ public override bool Supports(string file)
+ {
+ using (FileStream fstr = File.OpenRead(file))
+ {
+ long fLength = fstr.Length;
+ // arm9.bin is special in the sense that the last 12 bytes should/can be ignored.
+ if (Path.GetFileName(file) == "arm9.bin")
+ fLength -= 0xC;
+ return this.Supports(fstr, fLength);
+ }
+ }
+ #endregion
+
+ #region Method: Supports(Stream, long)
+ ///
+ /// Checks if this format supports decompressing the given input.
+ ///
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ // assume the 'inLength' does not include the 12 bytes at the end of arm9.bin
+
+ // only allow integer-sized files
+ if (inLength > 0xFFFFFFFFL)
+ return false;
+ // the header is 4 bytes minimum
+ if (inLength < 4)
+ return false;
+ long streamStart = stream.Position;
+ byte[] header = new byte[Math.Min(inLength, 0x20)];
+ stream.Position += inLength - header.Length;
+ stream.Read(header, 0, header.Length);
+ // reset the stream
+ stream.Position = streamStart;
+
+ uint extraSize = IOUtils.ToNDSu32(header, header.Length - 4);
+ if (extraSize == 0)
+ return false; // do not decompress whenevr the last 4 bytes are 0; too many files have that.
+ // if the extrasize is nonzero, the minimum header length is 8 bytes
+ if (header.Length < 8)
+ return false;
+ byte headerLen = header[header.Length - 5];
+ if (inLength < headerLen)
+ return false;
+
+ // the compressed length should fit in the input file
+ int compressedLen = header[header.Length - 6] << 16
+ | header[header.Length - 7] << 8
+ | header[header.Length - 8];
+ if (compressedLen >= inLength - headerLen && compressedLen != inLength)
+ return false;
+
+ // verify that the rest of the header is filled with 0xFF
+ for (int i = header.Length - 9; i >= header.Length - headerLen; i--)
+ if (header[i] != 0xFF)
+ return false;
+ return true;
+ }
+ #endregion
+
+ #region Method: Decompress(string, string)
+ ///
+ /// Decompresses the given input file to the given output file using the LZ-Overlay compression format.
+ ///
+ public override void Decompress(string infile, string outfile)
+ {
+ // make sure the output directory exists
+ string outDirectory = Path.GetDirectoryName(outfile);
+ if (!Directory.Exists(outDirectory))
+ Directory.CreateDirectory(outDirectory);
+ // open the two given files, and delegate to the format-specific code.
+ using (FileStream inStream = new FileStream(infile, FileMode.Open),
+ outStream = new FileStream(outfile, FileMode.Create))
+ {
+ long fLength = inStream.Length;
+ // arm9.bin needs special attention
+ if (Path.GetFileName(infile) == "arm9.bin")
+ fLength -= 0xC;
+ this.Decompress(inStream, fLength, outStream);
+ }
+ }
+ #endregion
+
+ #region Decompression method
+ ///
+ /// Decompresses the given input using the LZ-Overlay compression scheme.
+ ///
+ public override long Decompress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ #region Format description
+ // Overlay LZ compression is basically just LZ-0x10 compression.
+ // however the order if reading is reversed: the compression starts at the end of the file.
+ // Assuming we start reading at the end towards the beginning, the format is:
+ /*
+ * u32 extraSize; // decompressed data size = file length (including header) + this value
+ * u8 headerSize;
+ * u24 compressedLength; // can be less than file size (w/o header). If so, the rest of the file is uncompressed.
+ * // may also be the file size
+ * u8[headerSize-8] padding; // 0xFF-s
+ *
+ * 0x10-like-compressed data follows (without the usual 4-byte header).
+ * The only difference is that 2 should be added to the DISP value in compressed blocks
+ * to get the proper value.
+ * the u32 and u24 are read most significant byte first.
+ * if extraSize is 0, there is no headerSize, decompressedLength or padding.
+ * the data starts immediately, and is uncompressed.
+ *
+ * arm9.bin has 3 extra u32 values at the 'start' (ie: end of the file),
+ * which may be ignored. (and are ignored here) These 12 bytes also should not
+ * be included in the computation of the output size.
+ */
+ #endregion
+
+ #region First read the last 4 bytes of the stream (the 'extraSize')
+
+ // first go to the end of the stream, since we're reading from back to front
+ // read the last 4 bytes, the 'extraSize'
+ instream.Position += inLength - 4;
+
+ byte[] buffer = new byte[4];
+ try
+ {
+ instream.Read(buffer, 0, 4);
+ }
+ catch (System.IO.EndOfStreamException)
+ {
+ // since we're immediately checking the end of the stream,
+ // this is the only location where we have to check for an EOS to occur.
+ throw new StreamTooShortException();
+ }
+ uint extraSize = IOUtils.ToNDSu32(buffer, 0);
+
+ #endregion
+
+ // if the extra size is 0, there is no compressed part, and the header ends there.
+ if (extraSize == 0)
+ {
+ #region just copy the input to the output
+
+ // first go back to the start of the file. the current location is after the 'extraSize',
+ // and thus at the end of the file.
+ instream.Position -= inLength;
+ // no buffering -> slow
+ buffer = new byte[inLength - 4];
+ instream.Read(buffer, 0, (int)(inLength - 4));
+ outstream.Write(buffer, 0, (int)(inLength - 4));
+
+ // make sure the input is positioned at the end of the file
+ instream.Position += 4;
+
+ return inLength - 4;
+
+ #endregion
+ }
+ else
+ {
+ // get the size of the compression header first.
+ instream.Position -= 5;
+ int headerSize = instream.ReadByte();
+
+ // then the compressed data size.
+ instream.Position -= 4;
+ instream.Read(buffer, 0, 3);
+ int compressedSize = buffer[0] | (buffer[1] << 8) | (buffer[2] << 16);
+
+ // the compressed size sometimes is the file size.
+ if (compressedSize + headerSize >= inLength)
+ compressedSize = (int)(inLength - headerSize);
+
+ #region copy the non-compressed data
+
+ // copy the non-compressed data first.
+ buffer = new byte[inLength - headerSize - compressedSize];
+ instream.Position -= (inLength - 5);
+ instream.Read(buffer, 0, buffer.Length);
+ outstream.Write(buffer, 0, buffer.Length);
+
+ #endregion
+
+ // buffer the compressed data, such that we don't need to keep
+ // moving the input stream position back and forth
+ buffer = new byte[compressedSize];
+ instream.Read(buffer, 0, compressedSize);
+
+ // we're filling the output from end to start, so we can't directly write the data.
+ // buffer it instead (also use this data as buffer instead of a ring-buffer for
+ // decompression)
+ byte[] outbuffer = new byte[compressedSize + headerSize + extraSize];
+
+ int currentOutSize = 0;
+ int decompressedLength = outbuffer.Length;
+ int readBytes = 0;
+ byte flags = 0, mask = 1;
+ while (currentOutSize < decompressedLength)
+ {
+ // (throws when requested new flags byte is not available)
+ #region Update the mask. If all flag bits have been read, get a new set.
+ // the current mask is the mask used in the previous run. So if it masks the
+ // last flag bit, get a new flags byte.
+ if (mask == 1)
+ {
+ if (readBytes >= compressedSize)
+ throw new NotEnoughDataException(currentOutSize, decompressedLength);
+ flags = buffer[buffer.Length - 1 - readBytes]; readBytes++;
+ mask = 0x80;
+ }
+ else
+ {
+ mask >>= 1;
+ }
+ #endregion
+
+ // bit = 1 <=> compressed.
+ if ((flags & mask) > 0)
+ {
+ // (throws when < 2 bytes are available)
+ #region Get length and displacement('disp') values from next 2 bytes
+ // there are < 2 bytes available when the end is at most 1 byte away
+ if (readBytes + 1 >= inLength)
+ {
+ throw new NotEnoughDataException(currentOutSize, decompressedLength);
+ }
+ int byte1 = buffer[compressedSize - 1 - readBytes]; readBytes++;
+ int byte2 = buffer[compressedSize - 1 - readBytes]; readBytes++;
+
+ // the number of bytes to copy
+ int length = byte1 >> 4;
+ length += 3;
+
+ // from where the bytes should be copied (relatively)
+ int disp = ((byte1 & 0x0F) << 8) | byte2;
+ disp += 3;
+
+ if (disp > currentOutSize)
+ {
+ if (currentOutSize < 2)
+ throw new InvalidDataException("Cannot go back more than already written; "
+ + "attempt to go back 0x" + disp.ToString("X") + " when only 0x"
+ + currentOutSize.ToString("X") + " bytes have been written.");
+ // HACK. this seems to produce valid files, but isn't the most elegant solution.
+ // although this _could_ be the actual way to use a disp of 2 in this format,
+ // as otherwise the minimum would be 3 (and 0 is undefined, and 1 is less useful).
+ disp = 2;
+ }
+ #endregion
+
+ int bufIdx = currentOutSize - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = outbuffer[outbuffer.Length - 1 - bufIdx];
+ bufIdx++;
+ outbuffer[outbuffer.Length - 1 - currentOutSize] = next;
+ currentOutSize++;
+ }
+ }
+ else
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedLength);
+ byte next = buffer[buffer.Length - 1 - readBytes]; readBytes++;
+
+ outbuffer[outbuffer.Length - 1 - currentOutSize] = next;
+ currentOutSize++;
+ }
+ }
+
+ // write the decompressed data
+ outstream.Write(outbuffer, 0, outbuffer.Length);
+
+ // make sure the input is positioned at the end of the file; the stream is currently
+ // at the compression header.
+ instream.Position += headerSize;
+
+ return decompressedLength + (inLength - headerSize - compressedSize);
+ }
+ }
+ #endregion
+
+ #region Compression method; delegates to CompressNormal
+ ///
+ /// Compresses the input using the LZ-Overlay compression scheme.
+ ///
+ public override int Compress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ // don't bother trying to get the optimal not-compressed - compressed ratio for now.
+ // Either compress fully or don't compress (as the format cannot handle decompressed
+ // sizes that are smaller than the compressed file).
+
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // read the input and reverse it
+ byte[] indata = new byte[inLength];
+ instream.Read(indata, 0, (int)inLength);
+ Array.Reverse(indata);
+
+ MemoryStream inMemStream = new MemoryStream(indata);
+ MemoryStream outMemStream = new MemoryStream();
+ int compressedLength = this.CompressNormal(inMemStream, inLength, outMemStream);
+
+ int totalCompFileLength = (int)outMemStream.Length + 8;
+ // make the file 4-byte aligned with padding in the header
+ if (totalCompFileLength % 4 != 0)
+ totalCompFileLength += 4 - totalCompFileLength % 4;
+
+ if (totalCompFileLength < inLength)
+ {
+ byte[] compData = outMemStream.ToArray();
+ Array.Reverse(compData);
+ outstream.Write(compData, 0, compData.Length);
+ int writtenBytes = compData.Length;
+ // there always seem to be some padding FFs. Let's pad to make the file 4-byte aligned
+ while (writtenBytes % 4 != 0)
+ {
+ outstream.WriteByte(0xFF);
+ writtenBytes++;
+ }
+
+ outstream.WriteByte((byte)((compressedLength) & 0xFF));
+ outstream.WriteByte((byte)((compressedLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((compressedLength >> 16) & 0xFF));
+
+ int headerLength = totalCompFileLength - compData.Length;
+ outstream.WriteByte((byte)headerLength);
+
+ int extraSize = (int)inLength - totalCompFileLength;
+ outstream.WriteByte((byte)((extraSize) & 0xFF));
+ outstream.WriteByte((byte)((extraSize >> 8) & 0xFF));
+ outstream.WriteByte((byte)((extraSize >> 16) & 0xFF));
+ outstream.WriteByte((byte)((extraSize >> 24) & 0xFF));
+
+ return totalCompFileLength;
+ }
+ else
+ {
+ Array.Reverse(indata);
+ outstream.Write(indata, 0, (int)inLength);
+ outstream.WriteByte(0); outstream.WriteByte(0); outstream.WriteByte(0); outstream.WriteByte(0);
+ return (int)inLength + 4;
+ }
+ }
+ #endregion
+
+ #region 'Normal' compression method. Delegates to CompressWithLA when LookAhead is set
+ ///
+ /// Compresses the given input stream with the LZ-Ovl compression, but compresses _forward_
+ /// instad of backwards.
+ ///
+ /// The input stream to compress.
+ /// The length of the input stream.
+ /// The stream to write to.
+ private unsafe int CompressNormal(Stream instream, long inLength, Stream outstream)
+ {
+ // make sure the decompressed size fits in 3 bytes.
+ // There should be room for four bytes, however I'm not 100% sure if that can be used
+ // in every game, as it may not be a built-in function.
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // use the other method if lookahead is enabled
+ if (lookAhead)
+ {
+ return CompressWithLA(instream, inLength, outstream);
+ }
+
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ int compressedLength = 0;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ byte[] outbuffer = new byte[8 * 2 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+ while (readBytes < inLength)
+ {
+ #region If 8 blocks are bufferd, write them and reset the buffer
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+ #endregion
+
+ // determine if we're dealing with a compressed or raw block.
+ // it is a compressed block when the next 3 or more bytes can be copied from
+ // somewhere in the set of already compressed bytes.
+ int disp;
+ int oldLength = Math.Min(readBytes, 0x1001);
+ int length = LZUtil.GetOccurrenceLength(instart + readBytes, (int)Math.Min(inLength - readBytes, 0x12),
+ instart + readBytes - oldLength, oldLength, out disp);
+
+ // disp = 1 cannot be stored.
+ if (disp == 1)
+ {
+ length = 1;
+ }
+ // disp = 2 cannot be saved properly. use a too large disp instead.
+ // however since I'm not sure if that's actually how that's handled, don't compress instead.
+ else if (disp == 2)
+ {
+ length = 1;
+ /*if (readBytes < 0x1001)
+ disp = readBytes + 1;
+ else
+ length = 1;/**/
+ }
+
+ // length not 3 or more? next byte is raw data
+ if (length < 3)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // 3 or more bytes can be copied? next (length) bytes will be compressed into 2 bytes
+ readBytes += length;
+
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ outbuffer[bufferlength] = (byte)(((length - 3) << 4) & 0xF0);
+ outbuffer[bufferlength] |= (byte)(((disp - 3) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disp - 3) & 0xFF);
+ bufferlength++;
+ }
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ /*/ make the compressed file 4-byte aligned.
+ while ((compressedLength % 4) != 0)
+ {
+ outstream.WriteByte(0);
+ compressedLength++;
+ }/**/
+ }
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region Dynamic Programming compression method
+ ///
+ /// Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ /// and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ /// as the flag-bytes are not taken into account.
+ ///
+ private unsafe int CompressWithLA(Stream instream, long inLength, Stream outstream)
+ {
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ int compressedLength = 0;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ byte[] outbuffer = new byte[8 * 2 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+
+ // get the optimal choices for len and disp
+ int[] lengths, disps;
+ this.GetOptimalCompressionLengths(instart, indata.Length, out lengths, out disps);
+
+ int optCompressionLength = this.GetOptimalCompressionPartLength(lengths);
+
+ while (readBytes < optCompressionLength)
+ {
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+
+
+ if (lengths[readBytes] == 1)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 3) << 4) & 0xF0);
+ outbuffer[bufferlength] |= (byte)(((disps[readBytes] - 3) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disps[readBytes] - 3) & 0xFF);
+ bufferlength++;
+
+ readBytes += lengths[readBytes];
+ }
+
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ }
+
+ while (readBytes < inLength)
+ outstream.WriteByte(*(instart + (readBytes++)));
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region DP compression helper method; GetOptimalCompressionLengths
+ ///
+ /// Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ /// This takes O(n^2) time.
+ ///
+ /// The data to compress.
+ /// The length of the data to compress.
+ /// The optimal 'length' of the compressed blocks. For each byte in the input data,
+ /// this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ /// The 'disp' values of the compressed blocks. May be less than 3, in which case the
+ /// corresponding length will never be anything other than 1.
+ private unsafe void GetOptimalCompressionLengths(byte* indata, int inLength, out int[] lengths, out int[] disps)
+ {
+ lengths = new int[inLength];
+ disps = new int[inLength];
+ int[] minLengths = new int[inLength];
+
+ for (int i = inLength - 1; i >= 0; i--)
+ {
+ // first get the compression length when the next byte is not compressed
+ minLengths[i] = int.MaxValue;
+ lengths[i] = 1;
+ if (i + 1 >= inLength)
+ minLengths[i] = 1;
+ else
+ minLengths[i] = 1 + minLengths[i + 1];
+ // then the optimal compressed length
+ int oldLength = Math.Min(0x1001, i);
+ // get the appropriate disp while at it. Takes at most O(n) time if oldLength is considered O(n)
+ // be sure to bound the input length with 0x12, as that's the maximum length for LZ-Ovl compressed blocks.
+ int maxLen = LZUtil.GetOccurrenceLength(indata + i, Math.Min(inLength - i, 0x12),
+ indata + i - oldLength, oldLength, out disps[i]);
+ if (disps[i] > i)
+ throw new Exception("disp is too large");
+ // disp < 3 cannot be stored explicitly.
+ if (disps[i] < 3)
+ maxLen = 1;
+ for (int j = 3; j <= maxLen; j++)
+ {
+ int newCompLen;
+ if (i + j >= inLength)
+ newCompLen = 2;
+ else
+ newCompLen = 2 + minLengths[i + j];
+ if (newCompLen < minLengths[i])
+ {
+ lengths[i] = j;
+ minLengths[i] = newCompLen;
+ }
+ }
+ }
+
+ // we could optimize this further to also optimize it with regard to the flag-bytes, but that would require 8 times
+ // more space and time (one for each position in the block) for only a potentially tiny increase in compression ratio.
+ }
+ #endregion
+
+ #region DP compression helper method: GetOptimalCompressionPartLength
+ ///
+ /// Gets the 'optimal' length of the compressed part of the file.
+ /// Or rather: the length in such a way that compressing any more will not
+ /// result in a shorter file.
+ ///
+ /// The lengths of the compressed blocks, as gotten from GetOptimalCompressionLengths.
+ /// The 'optimal' length of the compressed part of the file.
+ private int GetOptimalCompressionPartLength(int[] blocklengths)
+ {
+ // first determine the actual total compressed length using the optimal compression.
+ int block8Idx = 0;
+ int insideBlockIdx = 0;
+ int totalCompLength = 0;
+ for (int i = 0; i < blocklengths.Length; )
+ {
+ if (insideBlockIdx == 8)
+ {
+ block8Idx++;
+ insideBlockIdx = 0;
+ totalCompLength++;
+ }
+ insideBlockIdx++;
+
+ if (blocklengths[i] >= 3)
+ totalCompLength += 2;
+ else
+ totalCompLength++;
+ i += blocklengths[i];
+ }
+
+ int[] actualRestCompLengths = new int[blocklengths.Length];
+ block8Idx = 0;
+ insideBlockIdx = 0;
+ for (int i = 0; i < blocklengths.Length; )
+ {
+ if (insideBlockIdx == 8)
+ {
+ block8Idx++;
+ insideBlockIdx = 0;
+ totalCompLength--;
+ }
+ if (blocklengths[i] >= 3)
+ totalCompLength -= 2;
+ else
+ totalCompLength--;
+ actualRestCompLengths[i] = totalCompLength;
+ i += blocklengths[i];
+ insideBlockIdx++;
+
+ if (totalCompLength > (blocklengths.Length - i))
+ return i;
+ }
+ return blocklengths.Length;
+ }
+ #endregion
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/CompositeFormats.cs b/CSharp/DSDecmp/Formats/Nitro/CompositeFormats.cs
new file mode 100644
index 0000000..9d32f04
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/CompositeFormats.cs
@@ -0,0 +1,94 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// A composite format with all formats supported natively by the GBA.
+ ///
+ public class CompositeGBAFormat : CompositeFormat
+ {
+ ///
+ /// Creates a new instance of the format composed of all native GBA compression formats.
+ ///
+ public CompositeGBAFormat()
+ : base(new Huffman4(), new Huffman8(), new LZ10()) { }
+
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "GBA"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "All formats natively supported by the GBA."; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "gba*"; }
+ }
+ }
+
+ ///
+ /// A composite format with all formats supported natively by the NDS (but not LZ-Overlay)
+ ///
+ public class CompositeNDSFormat : CompositeFormat
+ {
+ ///
+ /// Creates a new instance of the format composed of all native NDS compression formats.
+ ///
+ public CompositeNDSFormat()
+ : base(new Huffman4(), new Huffman8(), new LZ10(), new LZ11()) { }
+
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "NDS"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "All formats natively supported by the NDS."; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "nds*"; }
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/Huffman.cs b/CSharp/DSDecmp/Formats/Nitro/Huffman.cs
new file mode 100644
index 0000000..f5d2e67
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/Huffman.cs
@@ -0,0 +1,1070 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// Compressor and decompressor for the Huffman format used in many of the games for the
+ /// newer Nintendo consoles and handhelds.
+ ///
+ public abstract class Huffman : NitroCFormat
+ {
+ #region Enum: BlockSize
+ ///
+ /// The possible data sizes used in Huffman compression formats on the GBA/NDS.
+ ///
+ public enum BlockSize : byte
+ {
+ ///
+ /// Each data block is four bits long.
+ ///
+ FOURBIT = 0x24,
+ ///
+ /// Each data block is eight bits long.
+ ///
+ EIGHTBIT = 0x28
+ }
+ #endregion
+
+ ///
+ /// Sets the block size used when using the Huffman format to compress.
+ ///
+ public BlockSize CompressBlockSize { get; set; }
+
+ ///
+ /// Gets if this format supports compression. Always returns true.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+
+ #region Internal Constructor(BlockSize)
+ ///
+ /// Creates a new generic instance of the Huffman compression format.
+ ///
+ /// The block size used.
+ internal Huffman(BlockSize blockSize)
+ : base((byte)blockSize)
+ {
+ this.CompressBlockSize = blockSize;
+ }
+ #endregion
+
+
+ #region Decompression method
+ ///
+ /// Decompresses the given stream, writing the decompressed data to the given output stream.
+ /// Assumes Supports(instream)
returns true
.
+ /// After this call, the input stream will be positioned at the end of the compressed stream,
+ /// or at the initial position + inLength
, whichever comes first.
+ ///
+ /// The stream to decompress. At the end of this method, the position
+ /// of this stream is directly after the compressed data.
+ /// The length of the input data. Not necessarily all of the
+ /// input data may be read (if there is padding, for example), however never more than
+ /// this number of bytes is read from the input stream.
+ /// The stream to write the decompressed data to.
+ /// The length of the output data.
+ /// When the given length of the input data
+ /// is not enough to properly decompress the input.
+ public override long Decompress(Stream instream, long inLength, Stream outstream)
+ {
+ #region GBATEK format specification
+ /*
+ Data Header (32bit)
+ Bit0-3 Data size in bit units (normally 4 or 8)
+ Bit4-7 Compressed type (must be 2 for Huffman)
+ Bit8-31 24bit size of decompressed data in bytes
+ Tree Size (8bit)
+ Bit0-7 Size of Tree Table/2-1 (ie. Offset to Compressed Bitstream)
+ Tree Table (list of 8bit nodes, starting with the root node)
+ Root Node and Non-Data-Child Nodes are:
+ Bit0-5 Offset to next child node,
+ Next child node0 is at (CurrentAddr AND NOT 1)+Offset*2+2
+ Next child node1 is at (CurrentAddr AND NOT 1)+Offset*2+2+1
+ Bit6 Node1 End Flag (1=Next child node is data)
+ Bit7 Node0 End Flag (1=Next child node is data)
+ Data nodes are (when End Flag was set in parent node):
+ Bit0-7 Data (upper bits should be zero if Data Size is less than 8)
+ Compressed Bitstream (stored in units of 32bits)
+ Bit0-31 Node Bits (Bit31=First Bit) (0=Node0, 1=Node1)
+ */
+ #endregion
+
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != (byte)this.CompressBlockSize)
+ throw new InvalidDataException("The provided stream is not a valid Huffman "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + "); unknown block size.");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+ #region Read the Huff-tree
+
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(0, decompressedSize);
+ int treeSize = instream.ReadByte(); readBytes++;
+ if (treeSize < 0)
+ throw new InvalidDataException("The stream is too short to contain a Huffman tree.");
+
+ treeSize = (treeSize + 1) * 2;
+
+ if (readBytes + treeSize >= inLength)
+ throw new InvalidDataException("The Huffman tree is too large for the given input stream.");
+
+ long treeEnd = (instream.Position - 1) + treeSize;
+
+ // the relative offset may be 4 more (when the initial decompressed size is 0), but
+ // since it's relative that doesn't matter, especially when it only matters if
+ // the given value is odd or even.
+ HuffTreeNode rootNode = new HuffTreeNode(instream, false, 5, treeEnd);
+
+ readBytes += treeSize;
+ // re-position the stream after the tree (the stream is currently positioned after the root
+ // node, which is located at the start of the tree definition)
+ instream.Position = treeEnd;
+
+ #endregion
+
+ // the current u32 we are reading bits from.
+ uint data = 0;
+ // the amount of bits left to read from
+ byte bitsLeft = 0;
+
+ // a cache used for writing when the block size is four bits
+ int cachedByte = -1;
+
+ // the current output size
+ int currentSize = 0;
+ HuffTreeNode currentNode = rootNode;
+ byte[] buffer = new byte[4];
+
+ while (currentSize < decompressedSize)
+ {
+ #region find the next reference to a data node
+ while (!currentNode.IsData)
+ {
+ // if there are no bits left to read in the data, get a new byte from the input
+ if (bitsLeft == 0)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentSize, decompressedSize);
+ int nRead = instream.Read(buffer, 0, 4);
+ if (nRead < 4)
+ throw new StreamTooShortException();
+ readBytes += nRead;
+ data = IOUtils.ToNDSu32(buffer, 0);
+ bitsLeft = 32;
+ }
+ // get the next bit
+ bitsLeft--;
+ bool nextIsOne = (data & (1 << bitsLeft)) != 0;
+ // go to the next node, the direction of the child depending on the value of the current/next bit
+ currentNode = nextIsOne ? currentNode.Child1 : currentNode.Child0;
+ }
+ #endregion
+
+ #region write the data in the current node (when possible)
+ switch (this.CompressBlockSize)
+ {
+ case BlockSize.EIGHTBIT:
+ {
+ // just copy the data if the block size is a full byte
+ outstream.WriteByte(currentNode.Data);
+ currentSize++;
+ break;
+ }
+ case BlockSize.FOURBIT:
+ {
+ // cache the first half of the data if the block size is a half byte
+ if (cachedByte < 0)
+ {
+ cachedByte = currentNode.Data << 4;
+ }
+ else
+ {
+ // if we already cached a half-byte, combine the two halves and write the full byte.
+ cachedByte |= currentNode.Data;
+ outstream.WriteByte((byte)cachedByte);
+ currentSize++;
+ // be sure to forget the two written half-bytes
+ cachedByte = -1;
+ }
+ break;
+ }
+ default:
+ throw new Exception("Unknown block size " + this.CompressBlockSize.ToString());
+ }
+ #endregion
+
+ outstream.Flush();
+
+ // make sure to start over next round
+ currentNode = rootNode;
+ }
+
+ // the data is 4-byte aligned. Although very unlikely in this case (compressed bit blocks
+ // are always 4 bytes long, and the tree size is generally 4-byte aligned as well),
+ // skip any padding due to alignment.
+ if (readBytes % 4 != 0)
+ readBytes += 4 - (readBytes % 4);
+
+ if (readBytes < inLength)
+ {
+ throw new TooMuchInputException(readBytes, inLength);
+ }
+
+ return decompressedSize;
+ }
+ #endregion
+
+ #region Utility method: GetLowest(leafQueue, nodeQueue, out prio)
+ ///
+ /// Gets the tree node with the lowest priority (frequency) from the leaf and node queues.
+ /// If the priority is the same for both head items in the queues, the node from the leaf queue is picked.
+ ///
+ protected HuffTreeNode GetLowest(SimpleReversedPrioQueue leafQueue, SimpleReversedPrioQueue nodeQueue, out int prio)
+ {
+ if (leafQueue.Count == 0)
+ return nodeQueue.Dequeue(out prio);
+ else if (nodeQueue.Count == 0)
+ return leafQueue.Dequeue(out prio);
+ else
+ {
+ int leafPrio, nodePrio;
+ leafQueue.Peek(out leafPrio);
+ nodeQueue.Peek(out nodePrio);
+ // pick a node from the leaf queue when the priorities are equal.
+ if (leafPrio <= nodePrio)
+ return leafQueue.Dequeue(out prio);
+ else
+ return nodeQueue.Dequeue(out prio);
+ }
+ }
+ #endregion
+
+ #region Utility class: HuffTreeNode
+ ///
+ /// A single node in a Huffman tree.
+ ///
+ public class HuffTreeNode
+ {
+ #region Fields & Properties: Data & IsData
+ ///
+ /// The data contained in this node. May not mean anything when isData == false
+ ///
+ private byte data;
+ ///
+ /// A flag indicating if this node has been filled.
+ ///
+ private bool isFilled;
+ ///
+ /// The data contained in this node. May not mean anything when isData == false
.
+ /// Throws a NullReferenceException when this node has not been defined (ie: reference was outside the
+ /// bounds of the tree definition)
+ ///
+ public byte Data
+ {
+ get
+ {
+ if (!this.isFilled) throw new NullReferenceException("Reference to an undefined node in the huffman tree.");
+ return this.data;
+ }
+ }
+ ///
+ /// A flag indicating if this node contains data. If not, this is not a leaf node.
+ ///
+ private bool isData;
+ ///
+ /// Returns true if this node represents data.
+ ///
+ public bool IsData { get { return this.isData; } }
+ #endregion
+
+ #region Field & Properties: Children & Parent
+ ///
+ /// The child of this node at side 0
+ ///
+ private HuffTreeNode child0;
+ ///
+ /// The child of this node at side 0
+ ///
+ public HuffTreeNode Child0 { get { return this.child0; } }
+ ///
+ /// The child of this node at side 1
+ ///
+ private HuffTreeNode child1;
+ ///
+ /// The child of this node at side 1
+ ///
+ public HuffTreeNode Child1 { get { return this.child1; } }
+ ///
+ /// The parent node of this node.
+ ///
+ public HuffTreeNode Parent { get; private set; }
+ ///
+ /// Determines if this is the Child0 of the parent node. Assumes there is a parent.
+ ///
+ public bool IsChild0 { get { return this.Parent.child0 == this; } }
+ ///
+ /// Determines if this is the Child1 of the parent node. Assumes there is a parent.
+ ///
+ public bool IsChild1 { get { return this.Parent.child1 == this; } }
+ #endregion
+
+ #region Field & Property: Depth
+ private int depth;
+ ///
+ /// Get or set the depth of this node. Will not be set automatically, but
+ /// will be set recursively (the depth of all child nodes will be updated when this is set).
+ ///
+ public int Depth
+ {
+ get { return this.depth; }
+ set
+ {
+ this.depth = value;
+ // recursively set the depth of the child nodes.
+ if (!this.isData)
+ {
+ this.child0.Depth = this.depth + 1;
+ this.child1.Depth = this.depth + 1;
+ }
+ }
+ }
+ #endregion
+
+ #region Property: Size
+ ///
+ /// Calculates the size of the sub-tree with this node as root.
+ ///
+ public int Size
+ {
+ get
+ {
+ if (this.IsData)
+ return 1;
+ return 1 + this.child0.Size + this.child1.Size;
+ }
+ }
+ #endregion
+
+ ///
+ /// The index of this node in the array for building the proper ordering.
+ /// If -1, this node has not yet been placed in the array.
+ ///
+ internal int index = -1;
+
+ #region Constructor(data, isData, child0, child1)
+ ///
+ /// Manually creates a new node for a huffman tree.
+ ///
+ /// The data for this node.
+ /// If this node represents data.
+ /// The child of this node on the 0 side.
+ /// The child of this node on the 1 side.
+ public HuffTreeNode(byte data, bool isData, HuffTreeNode child0, HuffTreeNode child1)
+ {
+ this.data = data;
+ this.isData = isData;
+ this.child0 = child0;
+ this.child1 = child1;
+ this.isFilled = true;
+ if (!isData)
+ {
+ this.child0.Parent = this;
+ this.child1.Parent = this;
+ }
+ }
+ #endregion
+
+ #region Constructor(Stream, isData, relOffset, maxStreamPos)
+ ///
+ /// Creates a new node in the Huffman tree.
+ ///
+ /// The stream to read from. It is assumed that there is (at least)
+ /// one more byte available to read.
+ /// If this node is a data-node.
+ /// The offset of this node in the source data, relative to the start
+ /// of the compressed file.
+ /// The indicated end of the huffman tree. If the stream is past
+ /// this position, the tree is invalid.
+ public HuffTreeNode(Stream stream, bool isData, long relOffset, long maxStreamPos)
+ {
+ /*
+ Tree Table (list of 8bit nodes, starting with the root node)
+ Root Node and Non-Data-Child Nodes are:
+ Bit0-5 Offset to next child node,
+ Next child node0 is at (CurrentAddr AND NOT 1)+Offset*2+2
+ Next child node1 is at (CurrentAddr AND NOT 1)+Offset*2+2+1
+ Bit6 Node1 End Flag (1=Next child node is data)
+ Bit7 Node0 End Flag (1=Next child node is data)
+ Data nodes are (when End Flag was set in parent node):
+ Bit0-7 Data (upper bits should be zero if Data Size is less than 8)
+ */
+
+ if (stream.Position >= maxStreamPos)
+ {
+ // this happens when part of the tree is unused.
+ this.isFilled = false;
+ return;
+ }
+ this.isFilled = true;
+ int readData = stream.ReadByte();
+ if (readData < 0)
+ throw new StreamTooShortException();
+ this.data = (byte)readData;
+
+ this.isData = isData;
+
+ if (!this.isData)
+ {
+ int offset = this.data & 0x3F;
+ bool zeroIsData = (this.data & 0x80) > 0;
+ bool oneIsData = (this.data & 0x40) > 0;
+
+ // off AND NOT 1 == off XOR (off AND 1)
+ long zeroRelOffset = (relOffset ^ (relOffset & 1)) + offset * 2 + 2;
+
+ long currStreamPos = stream.Position;
+ // position the stream right before the 0-node
+ stream.Position += (zeroRelOffset - relOffset) - 1;
+ // read the 0-node
+ this.child0 = new HuffTreeNode(stream, zeroIsData, zeroRelOffset, maxStreamPos);
+ this.child0.Parent = this;
+ // the 1-node is directly behind the 0-node
+ this.child1 = new HuffTreeNode(stream, oneIsData, zeroRelOffset + 1, maxStreamPos);
+ this.child1.Parent = this;
+
+ // reset the stream position to right behind this node's data
+ stream.Position = currStreamPos;
+ }
+ }
+ #endregion
+
+ ///
+ /// Generates and returns a string-representation of the huffman tree starting at this node.
+ ///
+ public override string ToString()
+ {
+ if (this.isData)
+ {
+ return "<" + this.data.ToString("X2") + ">";
+ }
+ else
+ {
+ return "[" + this.child0.ToString() + "," + this.child1.ToString() + "]";
+ }
+ }
+
+ }
+ #endregion
+ }
+
+ ///
+ /// The Huffman compression scheme using 4-bit data blocks.
+ ///
+ public sealed class Huffman4 : Huffman
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "Huffman-4"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format.
+ ///
+ public override string Description
+ {
+ get { return "Huffman compression scheme using 4-bit datablocks."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "huff4"; }
+ }
+
+ ///
+ /// Creates a new instance of the 4-bit Huffman compression format.
+ ///
+ public Huffman4()
+ : base(BlockSize.FOURBIT) { }
+
+ #region 4-bit block size Compression method
+ ///
+ /// Applies Huffman compression with a datablock size of 4 bits.
+ ///
+ /// The stream to compress.
+ /// The length of the input stream.
+ /// The stream to write the decompressed data to.
+ /// The size of the decompressed data.
+ public override int Compress(Stream instream, long inLength, Stream outstream)
+ {
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // cache the input, as we need to build a frequency table
+ byte[] inputData = new byte[inLength];
+ instream.Read(inputData, 0, (int)inLength);
+
+ // build that frequency table.
+ int[] frequencies = new int[0x10];
+ for (int i = 0; i < inLength; i++)
+ {
+ frequencies[inputData[i] & 0xF]++;
+ frequencies[(inputData[i] >> 4) & 0xF]++;
+ }
+
+ #region Build the Huffman tree
+
+ SimpleReversedPrioQueue leafQueue = new SimpleReversedPrioQueue();
+ SimpleReversedPrioQueue nodeQueue = new SimpleReversedPrioQueue();
+ int nodeCount = 0;
+ // make all leaf nodes, and put them in the leaf queue. Also save them for later use.
+ HuffTreeNode[] leaves = new HuffTreeNode[0x10];
+ for (int i = 0; i < 0x10; i++)
+ {
+ // there is no need to store leaves that are not used
+ if (frequencies[i] == 0)
+ continue;
+ HuffTreeNode node = new HuffTreeNode((byte)i, true, null, null);
+ leaves[i] = node;
+ leafQueue.Enqueue(frequencies[i], node);
+ nodeCount++;
+ }
+
+ while (leafQueue.Count + nodeQueue.Count > 1)
+ {
+ // get the two nodes with the lowest priority.
+ HuffTreeNode one = null, two = null;
+ int onePrio, twoPrio;
+ one = GetLowest(leafQueue, nodeQueue, out onePrio);
+ two = GetLowest(leafQueue, nodeQueue, out twoPrio);
+
+ // give those two a common parent, and put that node in the node queue
+ HuffTreeNode newNode = new HuffTreeNode(0, false, one, two);
+ nodeQueue.Enqueue(onePrio + twoPrio, newNode);
+ nodeCount++;
+ }
+ int rootPrio;
+ HuffTreeNode root = nodeQueue.Dequeue(out rootPrio);
+ // set the depth of all nodes in the tree, such that we know for each leaf how long
+ // its codeword is.
+ root.Depth = 0;
+
+ #endregion
+
+ // now that we have a tree, we can write that tree and follow with the data.
+
+ // write the compression header first
+ outstream.WriteByte((byte)BlockSize.FOURBIT); // this is block size 4 only
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ #region write the tree
+
+ outstream.WriteByte((byte)((nodeCount - 1) / 2));
+ compressedLength++;
+
+ // use a breadth-first traversal to store the tree, such that we do not need to store/calculate the side of each sub-tree.
+ // because the data is only 4 bits long, no tree will ever let the offset field overflow.
+ LinkedList printQueue = new LinkedList();
+ printQueue.AddLast(root);
+ while (printQueue.Count > 0)
+ {
+ HuffTreeNode node = printQueue.First.Value;
+ printQueue.RemoveFirst();
+ if (node.IsData)
+ {
+ outstream.WriteByte(node.Data);
+ }
+ else
+ {
+ // bits 0-5: 'offset' = # nodes in queue left
+ // bit 6: node1 end flag
+ // bit 7: node0 end flag
+ byte data = (byte)(printQueue.Count / 2);
+ if (data > 0x3F)
+ throw new InvalidDataException("BUG: offset overflow in 4-bit huffman.");
+ data = (byte)(data & 0x3F);
+ if (node.Child0.IsData)
+ data |= 0x80;
+ if (node.Child1.IsData)
+ data |= 0x40;
+ outstream.WriteByte(data);
+
+ printQueue.AddLast(node.Child0);
+ printQueue.AddLast(node.Child1);
+ }
+ compressedLength++;
+ }
+
+ #endregion
+
+ #region write the data
+
+ // the codewords are stored in blocks of 32 bits
+ uint datablock = 0;
+ byte bitsLeftToWrite = 32;
+
+ for (int i = 0; i < inLength; i++)
+ {
+ byte data = inputData[i];
+
+ for (int j = 0; j < 2; j++)
+ {
+ HuffTreeNode node = leaves[(data >> (4 - j * 4)) & 0xF];
+ // the depth of the node is the length of the codeword required to encode the byte
+ int depth = node.Depth;
+ bool[] path = new bool[depth];
+ for (int d = 0; d < depth; d++)
+ {
+ path[depth - d - 1] = node.IsChild1;
+ node = node.Parent;
+ }
+ for (int d = 0; d < depth; d++)
+ {
+ if (bitsLeftToWrite == 0)
+ {
+ outstream.Write(IOUtils.FromNDSu32(datablock), 0, 4);
+ compressedLength += 4;
+ datablock = 0;
+ bitsLeftToWrite = 32;
+ }
+ bitsLeftToWrite--;
+ if (path[d])
+ datablock |= (uint)(1 << bitsLeftToWrite);
+ // no need to OR the buffer with 0 if it is child0
+ }
+
+ }
+ }
+
+ // write the partly filled data block as well
+ if (bitsLeftToWrite != 32)
+ {
+ outstream.Write(IOUtils.FromNDSu32(datablock), 0, 4);
+ compressedLength += 4;
+ }
+
+ #endregion
+
+ return compressedLength;
+ }
+ #endregion
+ }
+
+ ///
+ /// The Huffman compression scheme using 8-bit data blocks.
+ ///
+ public sealed class Huffman8 : Huffman
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "Huffman-8"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format.
+ ///
+ public override string Description
+ {
+ get { return "Huffman compression scheme using 8-bit datablocks."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "huff8"; }
+ }
+
+ ///
+ /// Creates a new instance of the 4-bit Huffman compression format.
+ ///
+ public Huffman8()
+ : base(BlockSize.EIGHTBIT) { }
+
+ #region 8-bit block size Compression method
+ ///
+ /// Applies Huffman compression with a datablock size of 8 bits.
+ ///
+ /// The stream to compress.
+ /// The length of the input stream.
+ /// The stream to write the decompressed data to.
+ /// The size of the decompressed data.
+ public override int Compress(Stream instream, long inLength, Stream outstream)
+ {
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // cache the input, as we need to build a frequency table
+ byte[] inputData = new byte[inLength];
+ instream.Read(inputData, 0, (int)inLength);
+
+ // build that frequency table.
+ int[] frequencies = new int[0x100];
+ for (int i = 0; i < inLength; i++)
+ frequencies[inputData[i]]++;
+
+ #region Build the Huffman tree
+
+ SimpleReversedPrioQueue leafQueue = new SimpleReversedPrioQueue();
+ SimpleReversedPrioQueue nodeQueue = new SimpleReversedPrioQueue();
+ int nodeCount = 0;
+ // make all leaf nodes, and put them in the leaf queue. Also save them for later use.
+ HuffTreeNode[] leaves = new HuffTreeNode[0x100];
+ for (int i = 0; i < 0x100; i++)
+ {
+ // there is no need to store leaves that are not used
+ if (frequencies[i] == 0)
+ continue;
+ HuffTreeNode node = new HuffTreeNode((byte)i, true, null, null);
+ leaves[i] = node;
+ leafQueue.Enqueue(frequencies[i], node);
+ nodeCount++;
+ }
+
+ while (leafQueue.Count + nodeQueue.Count > 1)
+ {
+ // get the two nodes with the lowest priority.
+ HuffTreeNode one = null, two = null;
+ int onePrio, twoPrio;
+ one = GetLowest(leafQueue, nodeQueue, out onePrio);
+ two = GetLowest(leafQueue, nodeQueue, out twoPrio);
+
+ // give those two a common parent, and put that node in the node queue
+ HuffTreeNode newNode = new HuffTreeNode(0, false, one, two);
+ nodeQueue.Enqueue(onePrio + twoPrio, newNode);
+ nodeCount++;
+ }
+ int rootPrio;
+ HuffTreeNode root = nodeQueue.Dequeue(out rootPrio);
+ // set the depth of all nodes in the tree, such that we know for each leaf how long
+ // its codeword is.
+ root.Depth = 0;
+
+ #endregion
+
+ // now that we have a tree, we can write that tree and follow with the data.
+
+ // write the compression header first
+ outstream.WriteByte((byte)BlockSize.EIGHTBIT); // this is block size 8 only
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ #region write the tree
+
+ outstream.WriteByte((byte)((nodeCount - 1) / 2));
+ compressedLength++;
+
+ // use a breadth-first traversal to store the tree, such that we do not need to store/calculate the size of each sub-tree.
+ // NO! BF results in an ordering that may overflow the offset field.
+
+ // find the BF order of all nodes that have two leaves as children. We're going to insert them in an array in reverse BF order,
+ // inserting the parent whenever both children have been inserted.
+
+ LinkedList leafStemQueue = new LinkedList();
+
+ #region fill the leaf queue; first->last will be reverse BF
+ LinkedList nodeCodeStack = new LinkedList();
+ nodeCodeStack.AddLast(root);
+ while (nodeCodeStack.Count > 0)
+ {
+ HuffTreeNode node = nodeCodeStack.First.Value;
+ nodeCodeStack.RemoveFirst();
+ if (node.IsData)
+ continue;
+ if (node.Child0.IsData && node.Child1.IsData)
+ {
+ leafStemQueue.AddFirst(node);
+ }
+ else
+ {
+ nodeCodeStack.AddLast(node.Child0);
+ nodeCodeStack.AddLast(node.Child1);
+ }
+
+ }
+ #endregion
+
+ HuffTreeNode[] nodeArray = new HuffTreeNode[0x1FF]; // this array does not contain the leaves themselves!
+ while (leafStemQueue.Count > 0)
+ {
+ Insert(leafStemQueue.First.Value, nodeArray, 0x3F + 1);
+ leafStemQueue.RemoveFirst();
+ }
+
+ // update the indices to ignore all gaps
+ int nodeIndex = 0;
+ for (int i = 0; i < nodeArray.Length; i++)
+ {
+ if (nodeArray[i] != null)
+ nodeArray[i].index = nodeIndex++;
+ }
+
+ // write the nodes in their given order. However when 'writing' a node, write the data of its children instead.
+ // the root node is always the first node.
+ byte rootData = 0;
+ if (root.Child0.IsData)
+ rootData |= 0x80;
+ if (root.Child1.IsData)
+ rootData |= 0x40;
+ outstream.WriteByte(rootData); compressedLength++;
+
+ for (int i = 0; i < nodeArray.Length; i++)
+ {
+ if (nodeArray[i] != null)
+ {
+ // nodes in this array are never data!
+ HuffTreeNode node0 = nodeArray[i].Child0;
+ if (node0.IsData)
+ outstream.WriteByte(node0.Data);
+ else
+ {
+ int offset = node0.index - nodeArray[i].index - 1;
+ if (offset > 0x3F)
+ throw new Exception("Offset overflow!");
+ byte data = (byte)offset;
+ if (node0.Child0.IsData)
+ data |= 0x80;
+ if (node0.Child1.IsData)
+ data |= 0x40;
+ outstream.WriteByte(data);
+ }
+
+ HuffTreeNode node1 = nodeArray[i].Child1;
+ if (node1.IsData)
+ outstream.WriteByte(node1.Data);
+ else
+ {
+ int offset = node1.index - nodeArray[i].index - 1;
+ if (offset > 0x3F)
+ throw new Exception("Offset overflow!");
+ byte data = (byte)offset;
+ if (node0.Child0.IsData)
+ data |= 0x80;
+ if (node0.Child1.IsData)
+ data |= 0x40;
+ outstream.WriteByte(data);
+ }
+
+ compressedLength += 2;
+ }
+ }
+ #endregion
+
+ #region write the data
+
+ // the codewords are stored in blocks of 32 bits
+ uint datablock = 0;
+ byte bitsLeftToWrite = 32;
+
+ for (int i = 0; i < inLength; i++)
+ {
+ byte data = inputData[i];
+ HuffTreeNode node = leaves[data];
+ // the depth of the node is the length of the codeword required to encode the byte
+ int depth = node.Depth;
+ bool[] path = new bool[depth];
+ for (int d = 0; d < depth; d++)
+ {
+ path[depth - d - 1] = node.IsChild1;
+ node = node.Parent;
+ }
+ for (int d = 0; d < depth; d++)
+ {
+ if (bitsLeftToWrite == 0)
+ {
+ outstream.Write(IOUtils.FromNDSu32(datablock), 0, 4);
+ compressedLength += 4;
+ datablock = 0;
+ bitsLeftToWrite = 32;
+ }
+ bitsLeftToWrite--;
+ if (path[d])
+ datablock |= (uint)(1 << bitsLeftToWrite);
+ // no need to OR the buffer with 0 if it is child0
+ }
+ }
+
+ // write the partly filled data block as well
+ if (bitsLeftToWrite != 32)
+ {
+ outstream.Write(IOUtils.FromNDSu32(datablock), 0, 4);
+ compressedLength += 4;
+ }
+
+ #endregion
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region Utility Method: Insert(node, HuffTreeNode[], maxOffset)
+ ///
+ /// Inserts the given node into the given array, in such a location that
+ /// the offset to both of its children is at most the given maximum, and as large as possible.
+ /// In order to do this, the contents of the array may be shifted to the right.
+ ///
+ /// The node to insert.
+ /// The array to insert the node in.
+ /// The maximum offset between parent and children.
+ private void Insert(HuffTreeNode node, HuffTreeNode[] array, int maxOffset)
+ {
+ // if the node has two data-children, insert it as far to the end as possible.
+ if (node.Child0.IsData && node.Child1.IsData)
+ {
+ for (int i = array.Length - 1; i >= 0; i--)
+ {
+ if (array[i] == null)
+ {
+ array[i] = node;
+ node.index = i;
+ break;
+ }
+ }
+ }
+ else
+ {
+ // if the node is not data, insert it as far left as possible.
+ // we know that both children are already present.
+ int offset = Math.Max(node.Child0.index - maxOffset, node.Child1.index - maxOffset);
+ offset = Math.Max(0, offset);
+ if (offset >= node.Child0.index || offset >= node.Child1.index)
+ {
+ // it may be that the childen are too far apart, with lots of empty entries in-between.
+ // shift the bottom child right until the node fits in its left-most place for the top child.
+ // (there should be more than enough room in the array)
+ while (offset >= Math.Min(node.Child0.index, node.Child1.index))
+ ShiftRight(array, Math.Min(node.Child0.index, node.Child1.index), maxOffset);
+ while (array[offset] != null)
+ ShiftRight(array, offset, maxOffset);
+ array[offset] = node;
+ node.index = offset;
+ }
+ else
+ {
+ for (int i = offset; i < node.Child0.index && i < node.Child1.index; i++)
+ {
+ if (array[i] == null)
+ {
+ array[i] = node;
+ node.index = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (node.index < 0)
+ throw new Exception("Node could not be inserted!");
+
+ // if the insertion of this node means that the parent has both children inserted, insert the parent.
+ if (node.Parent != null)
+ {
+ if ((node.Parent.Child0.index >= 0 || node.Parent.Child0.IsData)
+ && (node.Parent.Child1.index >= 0 || node.Parent.Child1.IsData))
+ Insert(node.Parent, array, maxOffset);
+ }
+ }
+ #endregion
+
+ #region Utility Method: ShiftRight(HuffTreeNode[], index, maxOffset)
+ ///
+ /// Shifts the node at the given index one to the right.
+ /// If the distance between parent and child becomes too large due to this shift, the parent is shifted as well.
+ ///
+ /// The array to shift the node in.
+ /// The index of the node to shift.
+ /// The maximum distance between parent and children.
+ private void ShiftRight(HuffTreeNode[] array, int idx, int maxOffset)
+ {
+ HuffTreeNode node = array[idx];
+ if (array[idx + 1] != null)
+ ShiftRight(array, idx + 1, maxOffset);
+ if (node.Parent.index > 0 && node.index - maxOffset + 1 > node.Parent.index)
+ ShiftRight(array, node.Parent.index, maxOffset);
+ if (node != array[idx])
+ return; // already done indirectly.
+ array[idx + 1] = array[idx];
+ array[idx] = null;
+ node.index++;
+ }
+ #endregion
+ }
+
+ ///
+ /// Composite compression format representing both Huffman compression schemes.
+ ///
+ public class HuffmanAny : CompositeFormat
+ {
+ ///
+ /// Creates a new instance of the general Huffman compression format.
+ ///
+ public HuffmanAny()
+ : base(new Huffman4(), new Huffman8()) { }
+
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "Huffman"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format.
+ ///
+ public override string Description
+ {
+ get { return "Either the Huffman-4 or Huffman-8 format."; }
+ }
+
+ ///
+ /// Gets if this format supports compression. Always returns true.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "huff"; }
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/LZ10.cs b/CSharp/DSDecmp/Formats/Nitro/LZ10.cs
new file mode 100644
index 0000000..6deda71
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/LZ10.cs
@@ -0,0 +1,469 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// Compressor and decompressor for the LZ-0x10 format used in many of the games for the
+ /// newer Nintendo consoles and handhelds.
+ ///
+ public sealed class LZ10 : NitroCFormat
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "LZ-10"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "Common LZ-type compression used in many post-GBC Nintendo games."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "lz10"; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ private static bool lookAhead = false;
+ ///
+ /// Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ /// with the LZ-10 format. The default is false, which is what is used in the original
+ /// implementation.
+ ///
+ public static bool LookAhead
+ {
+ set { lookAhead = value; }
+ }
+
+ ///
+ /// Creates a new instance of the LZ-10 compression format.
+ ///
+ public LZ10() : base(0x10) { }
+
+ ///
+ /// Checks if the given aguments have the '-opt' option, which makes this format
+ /// compress using (near-)optimal compression instead of the original compression algorithm.
+ ///
+ public override int ParseCompressionOptions(string[] args)
+ {
+ if (args.Length > 0)
+ if (args[0] == "-opt")
+ {
+ LookAhead = true;
+ return 1;
+ }
+ return 0;
+ }
+
+ #region 'Original' Decompression method
+ ///
+ /// Decompress a stream that is compressed in the LZ-10 format.
+ ///
+ /// The compressed stream.
+ /// The length of the input stream.
+ /// The output stream, where the decompressed data is written to.
+ public override long Decompress(Stream instream, long inLength,
+ Stream outstream)
+ {
+ #region format definition from GBATEK/NDSTEK
+ /* Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 1 for LZ77)
+ Bit 8-31 Size of decompressed data
+ Repeat below. Each Flag Byte followed by eight Blocks.
+ Flag data (8bit)
+ Bit 0-7 Type Flags for next 8 Blocks, MSB first
+ Block Type 0 - Uncompressed - Copy 1 Byte from Source to Dest
+ Bit 0-7 One data byte to be copied to dest
+ Block Type 1 - Compressed - Copy N+3 Bytes from Dest-Disp-1 to Dest
+ Bit 0-3 Disp MSBs
+ Bit 4-7 Number of bytes to copy (minus 3)
+ Bit 8-15 Disp LSBs
+ */
+ #endregion
+
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != base.magicByte)
+ throw new InvalidDataException("The provided stream is not a valid LZ-0x10 "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + ")");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+ // the maximum 'DISP-1' is 0xFFF.
+ int bufferLength = 0x1000;
+ byte[] buffer = new byte[bufferLength];
+ int bufferOffset = 0;
+
+
+ int currentOutSize = 0;
+ int flags = 0, mask = 1;
+ while (currentOutSize < decompressedSize)
+ {
+ // (throws when requested new flags byte is not available)
+ #region Update the mask. If all flag bits have been read, get a new set.
+ // the current mask is the mask used in the previous run. So if it masks the
+ // last flag bit, get a new flags byte.
+ if (mask == 1)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ flags = instream.ReadByte(); readBytes++;
+ if (flags < 0)
+ throw new StreamTooShortException();
+ mask = 0x80;
+ }
+ else
+ {
+ mask >>= 1;
+ }
+ #endregion
+
+ // bit = 1 <=> compressed.
+ if ((flags & mask) > 0)
+ {
+ // (throws when < 2 bytes are available)
+ #region Get length and displacement('disp') values from next 2 bytes
+ // there are < 2 bytes available when the end is at most 1 byte away
+ if (readBytes + 1 >= inLength)
+ {
+ // make sure the stream is at the end
+ if (readBytes < inLength)
+ {
+ instream.ReadByte(); readBytes++;
+ }
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ }
+ int byte1 = instream.ReadByte(); readBytes++;
+ int byte2 = instream.ReadByte(); readBytes++;
+ if (byte2 < 0)
+ throw new StreamTooShortException();
+
+ // the number of bytes to copy
+ int length = byte1 >> 4;
+ length += 3;
+
+ // from where the bytes should be copied (relatively)
+ int disp = ((byte1 & 0x0F) << 8) | byte2;
+ disp += 1;
+
+ if (disp > currentOutSize)
+ throw new InvalidDataException("Cannot go back more than already written. "
+ + "DISP = 0x" + disp.ToString("X") + ", #written bytes = 0x" + currentOutSize.ToString("X")
+ + " at 0x" + (instream.Position - 2).ToString("X"));
+ #endregion
+
+ int bufIdx = bufferOffset + bufferLength - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = buffer[bufIdx % bufferLength];
+ bufIdx++;
+ outstream.WriteByte(next);
+ buffer[bufferOffset] = next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ currentOutSize += length;
+ }
+ else
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int next = instream.ReadByte(); readBytes++;
+ if (next < 0)
+ throw new StreamTooShortException();
+
+ currentOutSize++;
+ outstream.WriteByte((byte)next);
+ buffer[bufferOffset] = (byte)next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ outstream.Flush();
+ }
+
+ if (readBytes < inLength)
+ {
+ // the input may be 4-byte aligned.
+ if ((readBytes ^ (readBytes & 3)) + 4 < inLength)
+ throw new TooMuchInputException(readBytes, inLength);
+ }
+
+ return decompressedSize;
+ }
+ #endregion
+
+ #region Original Compress method
+ ///
+ /// Compresses the input using the 'original', unoptimized compression algorithm.
+ /// This algorithm should yield files that are the same as those found in the games.
+ /// (delegates to the optimized method if LookAhead is set)
+ ///
+ public unsafe override int Compress(Stream instream, long inLength, Stream outstream)
+ {
+ // make sure the decompressed size fits in 3 bytes.
+ // There should be room for four bytes, however I'm not 100% sure if that can be used
+ // in every game, as it may not be a built-in function.
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // use the other method if lookahead is enabled
+ if (lookAhead)
+ {
+ return CompressWithLA(instream, inLength, outstream);
+ }
+
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ // write the compression header first
+ outstream.WriteByte(this.magicByte);
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ byte[] outbuffer = new byte[8 * 2 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+ while (readBytes < inLength)
+ {
+ #region If 8 blocks are bufferd, write them and reset the buffer
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+ #endregion
+
+ // determine if we're dealing with a compressed or raw block.
+ // it is a compressed block when the next 3 or more bytes can be copied from
+ // somewhere in the set of already compressed bytes.
+ int disp;
+ int oldLength = Math.Min(readBytes, 0x1000);
+ int length = LZUtil.GetOccurrenceLength(instart + readBytes, (int)Math.Min(inLength - readBytes, 0x12),
+ instart + readBytes - oldLength, oldLength, out disp);
+
+ // length not 3 or more? next byte is raw data
+ if (length < 3)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // 3 or more bytes can be copied? next (length) bytes will be compressed into 2 bytes
+ readBytes += length;
+
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ outbuffer[bufferlength] = (byte)(((length - 3) << 4) & 0xF0);
+ outbuffer[bufferlength] |= (byte)(((disp - 1) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disp - 1) & 0xFF);
+ bufferlength++;
+ }
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ /*/ make the compressed file 4-byte aligned.
+ while ((compressedLength % 4) != 0)
+ {
+ outstream.WriteByte(0);
+ compressedLength++;
+ }/**/
+ }
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region Dynamic Programming compression method
+ ///
+ /// Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ /// and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ /// as the flag-bytes are not taken into account.
+ ///
+ private unsafe int CompressWithLA(Stream instream, long inLength, Stream outstream)
+ {
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ // write the compression header first
+ outstream.WriteByte(this.magicByte);
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ byte[] outbuffer = new byte[8 * 2 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+
+ // get the optimal choices for len and disp
+ int[] lengths, disps;
+ this.GetOptimalCompressionLengths(instart, indata.Length, out lengths, out disps);
+ while (readBytes < inLength)
+ {
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+
+
+ if (lengths[readBytes] == 1)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 3) << 4) & 0xF0);
+ outbuffer[bufferlength] |= (byte)(((disps[readBytes] - 1) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disps[readBytes] - 1) & 0xFF);
+ bufferlength++;
+
+ readBytes += lengths[readBytes];
+ }
+
+
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ /*/ make the compressed file 4-byte aligned.
+ while ((compressedLength % 4) != 0)
+ {
+ outstream.WriteByte(0);
+ compressedLength++;
+ }/**/
+ }
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region DP compression helper method; GetOptimalCompressionLengths
+ ///
+ /// Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ /// This takes O(n^2) time.
+ ///
+ /// The data to compress.
+ /// The length of the data to compress.
+ /// The optimal 'length' of the compressed blocks. For each byte in the input data,
+ /// this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ /// The 'disp' values of the compressed blocks. May be 0, in which case the
+ /// corresponding length will never be anything other than 1.
+ private unsafe void GetOptimalCompressionLengths(byte* indata, int inLength, out int[] lengths, out int[] disps)
+ {
+ lengths = new int[inLength];
+ disps = new int[inLength];
+ int[] minLengths = new int[inLength];
+
+ for (int i = inLength - 1; i >= 0; i--)
+ {
+ // first get the compression length when the next byte is not compressed
+ minLengths[i] = int.MaxValue;
+ lengths[i] = 1;
+ if (i + 1 >= inLength)
+ minLengths[i] = 1;
+ else
+ minLengths[i] = 1 + minLengths[i + 1];
+ // then the optimal compressed length
+ int oldLength = Math.Min(0x1000, i);
+ // get the appropriate disp while at it. Takes at most O(n) time if oldLength is considered O(n)
+ // be sure to bound the input length with 0x12, as that's the maximum length for LZ-10 compressed blocks.
+ int maxLen = LZUtil.GetOccurrenceLength(indata + i, Math.Min(inLength - i, 0x12),
+ indata + i - oldLength, oldLength, out disps[i]);
+ if (disps[i] > i)
+ throw new Exception("disp is too large");
+ for (int j = 3; j <= maxLen; j++)
+ {
+ int newCompLen;
+ if (i + j >= inLength)
+ newCompLen = 2;
+ else
+ newCompLen = 2 + minLengths[i + j];
+ if (newCompLen < minLengths[i])
+ {
+ lengths[i] = j;
+ minLengths[i] = newCompLen;
+ }
+ }
+ }
+
+ // we could optimize this further to also optimize it with regard to the flag-bytes, but that would require 8 times
+ // more space and time (one for each position in the block) for only a potentially tiny increase in compression ratio.
+ }
+ #endregion
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/LZ11.cs b/CSharp/DSDecmp/Formats/Nitro/LZ11.cs
new file mode 100644
index 0000000..d030c51
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/LZ11.cs
@@ -0,0 +1,596 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// Compressor and decompressor for the LZ-0x11 format used in many of the games for the
+ /// newer Nintendo consoles and handhelds.
+ ///
+ public sealed class LZ11 : NitroCFormat
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "LZ-11"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "Variant of the LZ-0x10 format to support longer repetitions."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "lz11"; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ private static bool lookAhead = false;
+ ///
+ /// Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ /// with the LZ-11 format. The default is false, which is what is used in the original
+ /// implementation.
+ ///
+ public static bool LookAhead
+ {
+ set { lookAhead = value; }
+ }
+
+ ///
+ /// Creates a new instance of the LZ-11 compression format.
+ ///
+ public LZ11() : base(0x11) { }
+
+ ///
+ /// Checks if the given aguments have the '-opt' option, which makes this format
+ /// compress using (near-)optimal compression instead of the original compression algorithm.
+ ///
+ public override int ParseCompressionOptions(string[] args)
+ {
+ LookAhead = false;
+ if (args.Length > 0)
+ if (args[0] == "-opt")
+ {
+ LookAhead = true;
+ return 1;
+ }
+ return 0;
+ }
+
+ #region Decompression method
+ ///
+ /// Decompresses the input using the LZ-11 compression scheme.
+ ///
+ public override long Decompress(Stream instream, long inLength, Stream outstream)
+ {
+ #region Format definition in NDSTEK style
+ /* Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 1 for LZ77)
+ Bit 8-31 Size of decompressed data. if 0, the next 4 bytes are decompressed length
+ Repeat below. Each Flag Byte followed by eight Blocks.
+ Flag data (8bit)
+ Bit 0-7 Type Flags for next 8 Blocks, MSB first
+ Block Type 0 - Uncompressed - Copy 1 Byte from Source to Dest
+ Bit 0-7 One data byte to be copied to dest
+ Block Type 1 - Compressed - Copy LEN Bytes from Dest-Disp-1 to Dest
+ If Reserved is 0: - Default
+ Bit 0-3 Disp MSBs
+ Bit 4-7 LEN - 3
+ Bit 8-15 Disp LSBs
+ If Reserved is 1: - Higher compression rates for files with (lots of) long repetitions
+ Bit 4-7 Indicator
+ If Indicator > 1:
+ Bit 0-3 Disp MSBs
+ Bit 4-7 LEN - 1 (same bits as Indicator)
+ Bit 8-15 Disp LSBs
+ If Indicator is 1: A(B CD E)(F GH)
+ Bit 0-3 (LEN - 0x111) MSBs
+ Bit 4-7 Indicator; unused
+ Bit 8-15 (LEN- 0x111) 'middle'-SBs
+ Bit 16-19 Disp MSBs
+ Bit 20-23 (LEN - 0x111) LSBs
+ Bit 24-31 Disp LSBs
+ If Indicator is 0:
+ Bit 0-3 (LEN - 0x11) MSBs
+ Bit 4-7 Indicator; unused
+ Bit 8-11 Disp MSBs
+ Bit 12-15 (LEN - 0x11) LSBs
+ Bit 16-23 Disp LSBs
+ */
+ #endregion
+
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != base.magicByte)
+ throw new InvalidDataException("The provided stream is not a valid LZ-0x11 "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + ")");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+ // the maximum 'DISP-1' is still 0xFFF.
+ int bufferLength = 0x1000;
+ byte[] buffer = new byte[bufferLength];
+ int bufferOffset = 0;
+
+ int currentOutSize = 0;
+ int flags = 0, mask = 1;
+ while (currentOutSize < decompressedSize)
+ {
+ // (throws when requested new flags byte is not available)
+ #region Update the mask. If all flag bits have been read, get a new set.
+ // the current mask is the mask used in the previous run. So if it masks the
+ // last flag bit, get a new flags byte.
+ if (mask == 1)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ flags = instream.ReadByte(); readBytes++;
+ if (flags < 0)
+ throw new StreamTooShortException();
+ mask = 0x80;
+ }
+ else
+ {
+ mask >>= 1;
+ }
+ #endregion
+
+ // bit = 1 <=> compressed.
+ if ((flags & mask) > 0)
+ {
+ // (throws when not enough bytes are available)
+ #region Get length and displacement('disp') values from next 2, 3 or 4 bytes
+
+ // read the first byte first, which also signals the size of the compressed block
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte1 = instream.ReadByte(); readBytes++;
+ if (byte1 < 0)
+ throw new StreamTooShortException();
+
+ int length = byte1 >> 4;
+ int disp = -1;
+ if (length == 0)
+ {
+ #region case 0; 0(B C)(D EF) + (0x11)(0x1) = (LEN)(DISP)
+
+ // case 0:
+ // data = AB CD EF (with A=0)
+ // LEN = ABC + 0x11 == BC + 0x11
+ // DISP = DEF + 1
+
+ // we need two more bytes available
+ if (readBytes + 1 >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte2 = instream.ReadByte(); readBytes++;
+ int byte3 = instream.ReadByte(); readBytes++;
+ if (byte3 < 0)
+ throw new StreamTooShortException();
+
+ length = (((byte1 & 0x0F) << 4) | (byte2 >> 4)) + 0x11;
+ disp = (((byte2 & 0x0F) << 8) | byte3) + 0x1;
+
+ #endregion
+ }
+ else if (length == 1)
+ {
+ #region case 1: 1(B CD E)(F GH) + (0x111)(0x1) = (LEN)(DISP)
+
+ // case 1:
+ // data = AB CD EF GH (with A=1)
+ // LEN = BCDE + 0x111
+ // DISP = FGH + 1
+
+ // we need three more bytes available
+ if (readBytes + 2 >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte2 = instream.ReadByte(); readBytes++;
+ int byte3 = instream.ReadByte(); readBytes++;
+ int byte4 = instream.ReadByte(); readBytes++;
+ if (byte4 < 0)
+ throw new StreamTooShortException();
+
+ length = (((byte1 & 0x0F) << 12) | (byte2 << 4) | (byte3 >> 4)) + 0x111;
+ disp = (((byte3 & 0x0F) << 8) | byte4) + 0x1;
+
+ #endregion
+ }
+ else
+ {
+ #region case > 1: (A)(B CD) + (0x1)(0x1) = (LEN)(DISP)
+
+ // case other:
+ // data = AB CD
+ // LEN = A + 1
+ // DISP = BCD + 1
+
+ // we need only one more byte available
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte2 = instream.ReadByte(); readBytes++;
+ if (byte2 < 0)
+ throw new StreamTooShortException();
+
+ length = ((byte1 & 0xF0) >> 4) + 0x1;
+ disp = (((byte1 & 0x0F) << 8) | byte2) + 0x1;
+
+ #endregion
+ }
+
+ if (disp > currentOutSize)
+ throw new InvalidDataException("Cannot go back more than already written. "
+ + "DISP = " + disp + ", #written bytes = 0x" + currentOutSize.ToString("X")
+ + " before 0x" + instream.Position.ToString("X") + " with indicator 0x"
+ + (byte1 >> 4).ToString("X"));
+ #endregion
+
+ int bufIdx = bufferOffset + bufferLength - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = buffer[bufIdx % bufferLength];
+ bufIdx++;
+ outstream.WriteByte(next);
+ buffer[bufferOffset] = next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ currentOutSize += length;
+ }
+ else
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int next = instream.ReadByte(); readBytes++;
+ if (next < 0)
+ throw new StreamTooShortException();
+
+ outstream.WriteByte((byte)next); currentOutSize++;
+ buffer[bufferOffset] = (byte)next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ }
+
+ if (readBytes < inLength)
+ {
+ // the input may be 4-byte aligned.
+ if ((readBytes ^ (readBytes & 3)) + 4 < inLength)
+ throw new TooMuchInputException(readBytes, inLength);
+ }
+
+ return decompressedSize;
+ }
+ #endregion
+
+ #region Original compression method
+ ///
+ /// Compresses the input using the 'original', unoptimized compression algorithm.
+ /// This algorithm should yield files that are the same as those found in the games.
+ /// (delegates to the optimized method if LookAhead is set)
+ ///
+ public unsafe override int Compress(Stream instream, long inLength, Stream outstream)
+ {
+ // make sure the decompressed size fits in 3 bytes.
+ // There should be room for four bytes, however I'm not 100% sure if that can be used
+ // in every game, as it may not be a built-in function.
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ // use the other method if lookahead is enabled
+ if (lookAhead)
+ {
+ return CompressWithLA(instream, inLength, outstream);
+ }
+
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ // write the compression header first
+ outstream.WriteByte(this.magicByte);
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ // (a block is at most 4 bytes long)
+ byte[] outbuffer = new byte[8 * 4 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+ while (readBytes < inLength)
+ {
+ #region If 8 blocks are bufferd, write them and reset the buffer
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+ #endregion
+
+ // determine if we're dealing with a compressed or raw block.
+ // it is a compressed block when the next 3 or more bytes can be copied from
+ // somewhere in the set of already compressed bytes.
+ int disp;
+ int oldLength = Math.Min(readBytes, 0x1000);
+ int length = LZUtil.GetOccurrenceLength(instart + readBytes, (int)Math.Min(inLength - readBytes, 0x10110),
+ instart + readBytes - oldLength, oldLength, out disp);
+
+ // length not 3 or more? next byte is raw data
+ if (length < 3)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // 3 or more bytes can be copied? next (length) bytes will be compressed into 2 bytes
+ readBytes += length;
+
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ if (length > 0x110)
+ {
+ // case 1: 1(B CD E)(F GH) + (0x111)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = 0x10;
+ outbuffer[bufferlength] |= (byte)(((length - 0x111) >> 12) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((length - 0x111) >> 4) & 0xFF);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((length - 0x111) << 4) & 0xF0);
+ }
+ else if (length > 0x10)
+ {
+ // case 0; 0(B C)(D EF) + (0x11)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = 0x00;
+ outbuffer[bufferlength] |= (byte)(((length - 0x111) >> 4) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((length - 0x111) << 4) & 0xF0);
+ }
+ else
+ {
+ // case > 1: (A)(B CD) + (0x1)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = (byte)(((length - 1) << 4) & 0xF0);
+ }
+ // the last 1.5 bytes are always the disp
+ outbuffer[bufferlength] |= (byte)(((disp - 1) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disp - 1) & 0xFF);
+ bufferlength++;
+ }
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ /*/ make the compressed file 4-byte aligned.
+ while ((compressedLength % 4) != 0)
+ {
+ outstream.WriteByte(0);
+ compressedLength++;
+ }/**/
+ }
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region Dynamic Programming compression method
+ ///
+ /// Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ /// and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ /// as the flag-bytes are not taken into account.
+ ///
+ private unsafe int CompressWithLA(Stream instream, long inLength, Stream outstream)
+ {
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ // write the compression header first
+ outstream.WriteByte(this.magicByte);
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ int compressedLength = 4;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 8 blocks at a time.
+ // blocks are at most 4 bytes long.
+ byte[] outbuffer = new byte[8 * 4 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+
+ // get the optimal choices for len and disp
+ int[] lengths, disps;
+ this.GetOptimalCompressionLengths(instart, indata.Length, out lengths, out disps);
+ while (readBytes < inLength)
+ {
+ // we can only buffer 8 blocks at a time.
+ if (bufferedBlocks == 8)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+
+
+ if (lengths[readBytes] == 1)
+ {
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ }
+ else
+ {
+ // mark the next block as compressed
+ outbuffer[0] |= (byte)(1 << (7 - bufferedBlocks));
+
+ if (lengths[readBytes] > 0x110)
+ {
+ // case 1: 1(B CD E)(F GH) + (0x111)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = 0x10;
+ outbuffer[bufferlength] |= (byte)(((lengths[readBytes] - 0x111) >> 12) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 0x111) >> 4) & 0xFF);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 0x111) << 4) & 0xF0);
+ }
+ else if (lengths[readBytes] > 0x10)
+ {
+ // case 0; 0(B C)(D EF) + (0x11)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = 0x00;
+ outbuffer[bufferlength] |= (byte)(((lengths[readBytes] - 0x111) >> 4) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 0x111) << 4) & 0xF0);
+ }
+ else
+ {
+ // case > 1: (A)(B CD) + (0x1)(0x1) = (LEN)(DISP)
+ outbuffer[bufferlength] = (byte)(((lengths[readBytes] - 1) << 4) & 0xF0);
+ }
+ // the last 1.5 bytes are always the disp
+ outbuffer[bufferlength] |= (byte)(((disps[readBytes] - 1) >> 8) & 0x0F);
+ bufferlength++;
+ outbuffer[bufferlength] = (byte)((disps[readBytes] - 1) & 0xFF);
+ bufferlength++;
+
+ readBytes += lengths[readBytes];
+ }
+
+
+ bufferedBlocks++;
+ }
+
+ // copy the remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ /*/ make the compressed file 4-byte aligned.
+ while ((compressedLength % 4) != 0)
+ {
+ outstream.WriteByte(0);
+ compressedLength++;
+ }/**/
+ }
+ }
+
+ return compressedLength;
+ }
+ #endregion
+
+ #region DP compression helper method; GetOptimalCompressionLengths
+ ///
+ /// Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ /// This takes O(n^2) time, although in practice it will often be O(n^3) since one of the constants is 0x10110
+ /// (the maximum length of a compressed block)
+ ///
+ /// The data to compress.
+ /// The length of the data to compress.
+ /// The optimal 'length' of the compressed blocks. For each byte in the input data,
+ /// this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ /// The 'disp' values of the compressed blocks. May be 0, in which case the
+ /// corresponding length will never be anything other than 1.
+ private unsafe void GetOptimalCompressionLengths(byte* indata, int inLength, out int[] lengths, out int[] disps)
+ {
+ lengths = new int[inLength];
+ disps = new int[inLength];
+ int[] minLengths = new int[inLength];
+
+ for (int i = inLength - 1; i >= 0; i--)
+ {
+ // first get the compression length when the next byte is not compressed
+ minLengths[i] = int.MaxValue;
+ lengths[i] = 1;
+ if (i + 1 >= inLength)
+ minLengths[i] = 1;
+ else
+ minLengths[i] = 1 + minLengths[i + 1];
+ // then the optimal compressed length
+ int oldLength = Math.Min(0x1000, i);
+ // get the appropriate disp while at it. Takes at most O(n) time if oldLength is considered O(n) and 0x10110 constant.
+ // however since a lot of files will not be larger than 0x10110, this will often take ~O(n^2) time.
+ // be sure to bound the input length with 0x10110, as that's the maximum length for LZ-11 compressed blocks.
+ int maxLen = LZUtil.GetOccurrenceLength(indata + i, Math.Min(inLength - i, 0x10110),
+ indata + i - oldLength, oldLength, out disps[i]);
+ if (disps[i] > i)
+ throw new Exception("disp is too large");
+ for (int j = 3; j <= maxLen; j++)
+ {
+ int blocklen;
+ if (j > 0x110)
+ blocklen = 4;
+ else if (j > 0x10)
+ blocklen = 3;
+ else
+ blocklen = 2;
+ int newCompLen;
+ if (i + j >= inLength)
+ newCompLen = blocklen;
+ else
+ newCompLen = blocklen + minLengths[i + j];
+ if (newCompLen < minLengths[i])
+ {
+ lengths[i] = j;
+ minLengths[i] = newCompLen;
+ }
+ }
+ }
+
+ // we could optimize this further to also optimize it with regard to the flag-bytes, but that would require 8 times
+ // more space and time (one for each position in the block) for only a potentially tiny increase in compression ratio.
+ }
+ #endregion
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/NitroCFormat.cs b/CSharp/DSDecmp/Formats/Nitro/NitroCFormat.cs
new file mode 100644
index 0000000..9728cf1
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/NitroCFormat.cs
@@ -0,0 +1,89 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// Base class for Nitro-based decompressors. Uses the 1-byte magic and 3-byte decompression
+ /// size format.
+ ///
+ public abstract class NitroCFormat : CompressionFormat
+ {
+ ///
+ /// If true, Nitro Decompressors will not decompress files that have a decompressed
+ /// size (plaintext size) larger than MaxPlaintextSize.
+ ///
+ public static bool SkipLargePlaintexts = true;
+ ///
+ /// The maximum allowed size of the decompressed file (plaintext size) allowed for Nitro
+ /// Decompressors. Only used when SkipLargePlaintexts = true.
+ /// If the expected plaintext size is larger that this, the 'Supports' method will partially
+ /// decompress the data to check if the file is OK.
+ ///
+ public static int MaxPlaintextSize = 0x180000;
+
+ ///
+ /// The first byte of every file compressed with the format for this particular
+ /// Nitro Dcompressor instance.
+ ///
+ protected byte magicByte;
+
+ ///
+ /// Creates a new instance of the Nitro Compression Format base class.
+ ///
+ /// The expected first byte of the file for this format.
+ protected NitroCFormat(byte magicByte)
+ {
+ this.magicByte = magicByte;
+ }
+
+ ///
+ /// Checks if the first four (or eight) bytes match the format used in nitro compression formats.
+ ///
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ long startPosition = stream.Position;
+ try
+ {
+ int firstByte = stream.ReadByte();
+ if (firstByte != this.magicByte)
+ return false;
+ // no need to read the size info as well if it's used anyway.
+ if (!SkipLargePlaintexts)
+ return true;
+ byte[] sizeBytes = new byte[3];
+ stream.Read(sizeBytes, 0, 3);
+ int outSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ if (outSize == 0)
+ {
+ sizeBytes = new byte[4];
+ stream.Read(sizeBytes, 0, 4);
+ outSize = (int)IOUtils.ToNDSu32(sizeBytes, 0);
+ }
+ if (outSize <= MaxPlaintextSize)
+ return true;
+
+ try
+ {
+ stream.Position = startPosition;
+ this.Decompress(stream, Math.Min(Math.Min(inLength, 0x80000), MaxPlaintextSize), new System.IO.MemoryStream());
+ // we expect a NotEnoughDataException, since we're giving the decompressor only part of the file.
+ return false;
+ }
+ catch (NotEnoughDataException)
+ {
+ return true;
+ }
+ catch (Exception)
+ {
+ return false;
+ }
+ }
+ finally
+ {
+ stream.Position = startPosition;
+ }
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/NullCompression.cs b/CSharp/DSDecmp/Formats/Nitro/NullCompression.cs
new file mode 100644
index 0000000..5efa929
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/NullCompression.cs
@@ -0,0 +1,158 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// 'Compression' format without any compression whatsoever.
+ /// Compression using this format will only prepend 0x00 plus the original file size to the file.
+ ///
+ public class NullCompression : NitroCFormat
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "NULL"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "NULL-'compression' format. Prefixes file with 0x00 and filesize."; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "null"; }
+ }
+
+ ///
+ /// Creates a new instance of the NULL-compression format.
+ ///
+ public NullCompression()
+ : base(0) { }
+
+ ///
+ /// Checks if the given stream is (or could be) 'compressed' using the NULL compression format.
+ ///
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ long startPosition = stream.Position;
+ try
+ {
+ int firstByte = stream.ReadByte();
+ if (firstByte != 0)
+ return false;
+ byte[] sizeBytes = new byte[3];
+ stream.Read(sizeBytes, 0, 3);
+ int outSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ int headerSize = 4;
+ if (outSize == 0)
+ {
+ sizeBytes = new byte[4];
+ stream.Read(sizeBytes, 0, 4);
+ outSize = (int)IOUtils.ToNDSu32(sizeBytes, 0);
+ headerSize = 8;
+ }
+ return outSize == inLength - headerSize;
+ }
+ finally
+ {
+ stream.Position = startPosition;
+ }
+ }
+
+ ///
+ /// 'Decompresses' the given input stream using the NULL format.
+ ///
+ public override long Decompress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != base.magicByte)
+ throw new InvalidDataException("The provided stream is not a valid Null "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + ")");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+ byte[] data = new byte[decompressedSize];
+ int readLength = instream.Read(data, 0, decompressedSize);
+ outstream.Write(data, 0, readLength);
+ if (readLength < decompressedSize)
+ throw new NotEnoughDataException(readLength, decompressedSize);
+
+ return readLength;
+ }
+
+ ///
+ /// 'Compresses' the given input stream using the NULL format.
+ ///
+ public override int Compress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ if (inLength > 0xFFFFFFFF)
+ throw new InputTooLargeException();
+
+ long outSize = 4;
+
+ outstream.WriteByte(0);
+ if (inLength <= 0xFFFFFF)
+ {
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+ }
+ else
+ {
+ outstream.WriteByte(0);
+ outstream.WriteByte(0);
+ outstream.WriteByte(0);
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 24) & 0xFF));
+ outSize = 8;
+ }
+
+ byte[] buffer = new byte[Math.Min(int.MaxValue, inLength)];
+ long remaining = inLength;
+ while (remaining > 0)
+ {
+ int readLength = instream.Read(buffer, 0, (int)Math.Min(buffer.Length, remaining));
+ if (readLength == 0)
+ throw new StreamTooShortException();
+ remaining -= readLength;
+ outstream.Write(buffer, 0, readLength);
+ outSize += readLength;
+ }
+
+ return (int)Math.Min(int.MaxValue, outSize);
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Formats/Nitro/RLE.cs b/CSharp/DSDecmp/Formats/Nitro/RLE.cs
new file mode 100644
index 0000000..b71d17a
--- /dev/null
+++ b/CSharp/DSDecmp/Formats/Nitro/RLE.cs
@@ -0,0 +1,302 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp.Formats.Nitro
+{
+ ///
+ /// Compressor and decompressor for the RLE format used in several of the games for the
+ /// newer Nintendo consoles and handhelds.
+ ///
+ public sealed class RLE : NitroCFormat
+ {
+ ///
+ /// Gets a short string identifying this compression format.
+ ///
+ public override string ShortFormatString
+ {
+ get { return "RLE"; }
+ }
+
+ ///
+ /// Gets a short description of this compression format (used in the program usage).
+ ///
+ public override string Description
+ {
+ get { return "Run-Length Encoding used in some modern Nintendo games."; }
+ }
+
+ ///
+ /// Gets the value that must be given on the command line in order to compress using this format.
+ ///
+ public override string CompressionFlag
+ {
+ get { return "rle"; }
+ }
+
+ ///
+ /// Gets if this format supports compressing a file.
+ ///
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ ///
+ /// Creates a new instance of the RLE compression format.
+ ///
+ public RLE() : base(0x30) { }
+
+ #region Method: Decompress
+ ///
+ /// Decompresses the input using the RLE compression scheme.
+ ///
+ public override long Decompress(Stream instream, long inLength, Stream outstream)
+ {
+ /*
+ Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 3 for run-length)
+ Bit 8-31 Size of decompressed data
+ Repeat below. Each Flag Byte followed by one or more Data Bytes.
+ Flag data (8bit)
+ Bit 0-6 Expanded Data Length (uncompressed N-1, compressed N-3)
+ Bit 7 Flag (0=uncompressed, 1=compressed)
+ Data Byte(s) - N uncompressed bytes, or 1 byte repeated N times
+ */
+
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != base.magicByte)
+ throw new InvalidDataException("The provided stream is not a valid RLE "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + ")");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+
+ int currentOutSize = 0;
+ while (currentOutSize < decompressedSize)
+ {
+ #region (try to) get the flag byte with the length data and compressed flag
+
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int flag = instream.ReadByte(); readBytes++;
+ if (flag < 0)
+ throw new StreamTooShortException();
+
+ bool compressed = (flag & 0x80) > 0;
+ int length = flag & 0x7F;
+
+ if (compressed)
+ length += 3;
+ else
+ length += 1;
+
+ #endregion
+
+ if (compressed)
+ {
+ #region compressed: write the next byte (length) times.
+
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int data = instream.ReadByte(); readBytes++;
+ if (data < 0)
+ throw new StreamTooShortException();
+
+ if (currentOutSize + length > decompressedSize)
+ throw new InvalidDataException("The given stream is not a valid RLE stream; the "
+ + "output length does not match the provided plaintext length.");
+ byte bdata = (byte)data;
+ for (int i = 0; i < length; i++)
+ {
+ // Stream.Write(byte[], offset, len) may also work, but only if it is a circular buffer
+ outstream.WriteByte(bdata);
+ currentOutSize++;
+ }
+
+ #endregion
+ }
+ else
+ {
+ #region uncompressed: copy the next (length) bytes.
+
+ int tryReadLength = length;
+ // limit the amount of bytes read by the indicated number of bytes available
+ if (readBytes + length > inLength)
+ tryReadLength = (int)(inLength - readBytes);
+
+ byte[] data = new byte[length];
+ int readLength = instream.Read(data, 0, (int)tryReadLength);
+ readBytes += readLength;
+ outstream.Write(data, 0, readLength);
+ currentOutSize += readLength;
+
+ // if the attempted number of bytes read is less than the desired number, the given input
+ // length is too small (or there is not enough data in the stream)
+ if (tryReadLength < length)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ // if the actual number of read bytes is even less, it means that the end of the stream has
+ // bee reached, thus the given input length is larger than the actual length of the input
+ if (readLength < length)
+ throw new StreamTooShortException();
+
+ #endregion
+ }
+ }
+
+ if (readBytes < inLength)
+ {
+ // the input may be 4-byte aligned.
+ if ((readBytes ^ (readBytes & 3)) + 4 < inLength)
+ throw new TooMuchInputException(readBytes, inLength);
+ }
+
+ return decompressedSize;
+ }
+ #endregion Decompress
+
+ #region Method: Compress
+ ///
+ /// Compresses the input using the RLE compression scheme.
+ ///
+ public override int Compress(Stream instream, long inLength, Stream outstream)
+ {
+
+ if (inLength > 0xFFFFFF)
+ throw new InputTooLargeException();
+
+ List compressedData = new List();
+
+ // at most 0x7F+3=130 bytes are compressed into a single block.
+ // (and at most 0x7F+1=128 in an uncompressed block, however we need to read 2
+ // more, since the last byte may be part of a repetition).
+ byte[] dataBlock = new byte[130];
+ // the length of the valid content in the current data block
+ int currentBlockLength = 0;
+
+ int readLength = 0;
+ int nextByte;
+ int repCount = 1;
+ while (readLength < inLength)
+ {
+ bool foundRepetition = false;
+
+ while (currentBlockLength < dataBlock.Length && readLength < inLength)
+ {
+ nextByte = instream.ReadByte();
+ if (nextByte < 0)
+ throw new StreamTooShortException();
+ readLength++;
+
+ dataBlock[currentBlockLength++] = (byte)nextByte;
+ if (currentBlockLength > 1)
+ {
+ if (nextByte == dataBlock[currentBlockLength - 2])
+ repCount++;
+ else
+ repCount = 1;
+ }
+
+ foundRepetition = repCount > 2;
+ if (foundRepetition)
+ break;
+ }
+
+
+ int numUncompToCopy = 0;
+ if (foundRepetition)
+ {
+ // if a repetition was found, copy block size - 3 bytes as compressed data
+ numUncompToCopy = currentBlockLength - 3;
+ }
+ else
+ {
+ // if no repetition was found, copy min(block size, max block size - 2) bytes as uncompressed data.
+ numUncompToCopy = Math.Min(currentBlockLength, dataBlock.Length - 2);
+ }
+
+ #region insert uncompressed block
+ if (numUncompToCopy > 0)
+ {
+ byte flag = (byte)(numUncompToCopy - 1);
+ compressedData.Add(flag);
+ for (int i = 0; i < numUncompToCopy; i++)
+ compressedData.Add(dataBlock[i]);
+ // shift some possibly remaining bytes to the start
+ for (int i = numUncompToCopy; i < currentBlockLength; i++)
+ dataBlock[i - numUncompToCopy] = dataBlock[i];
+ currentBlockLength -= numUncompToCopy;
+ }
+ #endregion
+
+ if (foundRepetition)
+ {
+ // if a repetition was found, continue until the first different byte
+ // (or until the buffer is full)
+ while (currentBlockLength < dataBlock.Length && readLength < inLength)
+ {
+ nextByte = instream.ReadByte();
+ if (nextByte < 0)
+ throw new StreamTooShortException();
+ readLength++;
+
+ dataBlock[currentBlockLength++] = (byte)nextByte;
+
+ if (nextByte != dataBlock[0])
+ break;
+ else
+ repCount++;
+ }
+
+ // the next repCount bytes are the same.
+ #region insert compressed block
+ byte flag = (byte)(0x80 | (repCount - 3));
+ compressedData.Add(flag);
+ compressedData.Add(dataBlock[0]);
+ // make sure to shift the possible extra byte to the start
+ if (repCount != currentBlockLength)
+ dataBlock[0] = dataBlock[currentBlockLength - 1];
+ currentBlockLength -= repCount;
+ #endregion
+ }
+ }
+
+ // write any reamaining bytes as uncompressed
+ if (currentBlockLength > 0)
+ {
+ byte flag = (byte)(currentBlockLength - 1);
+ compressedData.Add(flag);
+ for (int i = 0; i < currentBlockLength; i++)
+ compressedData.Add(dataBlock[i]);
+ currentBlockLength = 0;
+ }
+
+ // write the RLE marker and the decompressed size
+ outstream.WriteByte(0x30);
+ int compLen = compressedData.Count;
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+
+ // write the compressed data
+ outstream.Write(compressedData.ToArray(), 0, compLen);
+
+ // the total compressed stream length is the compressed data length + the 4-byte header
+ return compLen + 4;
+ }
+ #endregion Compress
+ }
+}
diff --git a/CSharp/DSDecmp/NewProgram.cs b/CSharp/DSDecmp/NewProgram.cs
new file mode 100644
index 0000000..9fd8b1b
--- /dev/null
+++ b/CSharp/DSDecmp/NewProgram.cs
@@ -0,0 +1,561 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+using DSDecmp.Formats.Nitro;
+using DSDecmp.Formats;
+
+namespace DSDecmp
+{
+
+ public static class NewProgram
+ {
+
+
+ ///
+ /// The formats allowed when compressing a file.
+ ///
+ public enum Formats
+ {
+ LZOVL, // keep this as the first one, as only the end of a file may be LZ-ovl-compressed (and overlay files are oftenly double-compressed) (it needs to be attempted first when decompressing)
+ LZ10,
+ LZ11,
+ HUFF4,
+ HUFF8,
+ RLE,
+ HUFF,
+ NDS,
+ GBA
+ }
+
+ public static void MainNewOld(string[] args)
+ {
+ if (args.Length == 0)
+ {
+ PrintUsage();
+#if DEBUG
+ Console.ReadLine();
+#endif
+ return;
+ }
+
+ int argIndex = 0;
+ bool compress = false;
+ Formats compressFormat = Formats.NDS;
+
+ #region check for the -c option and its parameter(s)
+ if (args[argIndex].Equals("-c"))
+ {
+ argIndex++;
+ compress = true;
+
+ if (args.Length < argIndex + 2)
+ {
+ Console.WriteLine("A compression format and input file is required in order to compress.");
+ Console.WriteLine();
+ PrintUsage();
+ return;
+ }
+ switch (args[argIndex].ToLower())
+ {
+ case "lz10": compressFormat = Formats.LZ10; break;
+ case "lz11": compressFormat = Formats.LZ11; break;
+ case "lzovl": compressFormat = Formats.LZOVL; break;
+ case "rle": compressFormat = Formats.RLE; break;
+ case "huff4": compressFormat = Formats.HUFF4; break;
+ case "huff8": compressFormat = Formats.HUFF8; break;
+ case "huff": compressFormat = Formats.HUFF; break;
+ case "gba*": compressFormat = Formats.GBA; break;
+ case "nds*": compressFormat = Formats.NDS; break;
+ default:
+ Console.WriteLine("Unknown compression format " + args[argIndex]);
+ Console.WriteLine();
+ PrintUsage();
+ return;
+ }
+ argIndex++;
+ // handle the format options
+ switch (compressFormat)
+ {
+ case Formats.LZ10:
+ case Formats.GBA:
+ if (args[argIndex].Equals("-opt"))
+ {
+ LZ10.LookAhead = true;
+ argIndex++;
+ }
+ break;
+ case Formats.LZ11:
+ if (args[argIndex].Equals("-opt"))
+ {
+ LZ11.LookAhead = true;
+ argIndex++;
+ }
+ break;
+ case Formats.LZOVL:
+ if (args[argIndex].Equals("-opt"))
+ {
+ LZOvl.LookAhead = true;
+ argIndex++;
+ }
+ break;
+ case Formats.NDS:
+ if (args[argIndex].Equals("-opt"))
+ {
+ LZ10.LookAhead = true;
+ LZ11.LookAhead = true;
+ LZOvl.LookAhead = true;
+ argIndex++;
+ }
+ break;
+ }
+ }
+ #endregion
+
+ if (args.Length < argIndex + 1)
+ throw new ArgumentException("No input file given.");
+
+ bool guessExtension = false;
+ if (args[argIndex].Equals("-ge"))
+ {
+ guessExtension = true;
+ argIndex++;
+ }
+
+ if (args.Length < argIndex + 1)
+ throw new ArgumentException("No input file given.");
+
+ string input = args[argIndex++];
+ string output = null;
+ if (args.Length > argIndex)
+ output = args[argIndex++];
+
+ if (compress)
+ Compress(input, output, compressFormat, guessExtension);
+ else
+ Decompress(input, output, guessExtension);
+
+#if DEBUG
+ Console.ReadLine();
+#endif
+ }
+
+ private static void PrintUsage()
+ {
+ Console.WriteLine("DSDecmp - Decompressor for compression formats used on the NDS - by Barubary");
+ Console.WriteLine();
+ Console.WriteLine("Usage:\tDSDecmp (-c FORMAT FORMATOPT*) (-ge) input (output)");
+ Console.WriteLine();
+ Console.WriteLine("Without the -c modifier, DSDecmp will decompress the input file to the output");
+ Console.WriteLine(" file. If the output file is a directory, the output file will be placed in");
+ Console.WriteLine(" that directory with the same filename as the original file. The extension will");
+ Console.WriteLine(" be appended with a format-specific extension.");
+ Console.WriteLine("The input can also be a directory. In that case, it would be the same as");
+ Console.WriteLine(" calling DSDecmp for every non-directory in the given directory with the same");
+ Console.WriteLine(" options, with one exception; the output is by default the input folder, but");
+ Console.WriteLine(" with '_dec' appended.");
+ Console.WriteLine("If the output does not exist, it is assumed to be the same type as the input");
+ Console.WriteLine(" (file or directory).");
+ Console.WriteLine("If there is no output file given, it is assumed to be the directory of the");
+ Console.WriteLine(" input file.");
+ Console.WriteLine();
+ Console.WriteLine("With the -ge option, instead of a format-specific extension, the extension");
+ Console.WriteLine(" will be guessed from the first four bytes of the output file. Only");
+ Console.WriteLine(" non-accented letters or numbers are considered in those four bytes.");
+ Console.WriteLine();
+ Console.WriteLine("With the -c option, the input is compressed instead of decompressed. FORMAT");
+ Console.WriteLine("indicates the desired compression format, and can be one of:");
+ Console.WriteLine(" --- formats built-in in the NDS ---");
+ Console.WriteLine(" lz10 - 'default' LZ-compression format.");
+ Console.WriteLine(" lz11 - LZ-compression format better suited for files with long repetitions");
+ Console.WriteLine(" lzovl - LZ-compression used in 'overlay files'.");
+ Console.WriteLine(" rle - Run-Length Encoding 'compression'.");
+ Console.WriteLine(" huff4 - Huffman compression with 4-bit sized data blocks.");
+ Console.WriteLine(" huff8 - Huffman compression with 8-bit sized data blocks.");
+ Console.WriteLine(" --- utility 'formats' ---");
+ Console.WriteLine(" huff - The Huffman compression that gives the bext compression ratio.");
+ Console.WriteLine(" nds* - The built-in compression format that gives the best compression");
+ Console.WriteLine(" ratio. Will never compress using lzovl.");
+ Console.WriteLine(" gba* - The built-in compression format that gives the best compression");
+ Console.WriteLine(" ratio, and is also supported by the GBA.");
+ Console.WriteLine();
+ Console.WriteLine("The following format options (FORMATOPT) are available:");
+ Console.WriteLine(" lz10, lz11, lzovl, gba* and nds*:");
+ Console.WriteLine(" -opt : employs a better compression algorithm to boost the compression");
+ Console.WriteLine(" ratio. Not using this option will result in using the algorithm");
+ Console.WriteLine(" originally used to compress the game files.");
+ Console.WriteLine(" Using this option for the gba* and nds* will only have effect on");
+ Console.WriteLine(" the lz10, lz11 and lzovl algorithms.");
+ Console.WriteLine();
+ Console.WriteLine("If the input is a directory when the -c option, the default output directory");
+ Console.WriteLine(" is the input directory appended with '_cmp'.");
+ Console.WriteLine();
+ Console.WriteLine("Supplying the -ge modifier together with the -c modifier, the extension of the");
+ Console.WriteLine(" compressed files will be extended with the 'FORMAT' value that always results");
+ Console.WriteLine(" in that particualr format (so 'lz11', 'rle', etc).");
+ Console.WriteLine("If the -ge modifier is not present, the extension of compressed files will be");
+ Console.WriteLine(" extended with .cdat");
+
+ }
+
+ #region compression methods
+
+ private static void Compress(string input, string output, Formats format, bool guessExtension)
+ {
+ if (!File.Exists(input) && !Directory.Exists(input))
+ {
+ Console.WriteLine("Cannot compress a file or directory that does not exist (" + input + ")");
+ return;
+ }
+
+ // set the default value of the output
+ if (string.IsNullOrEmpty(output))
+ {
+ if (Directory.Exists(input))
+ {
+ string newDir = Path.GetFullPath(input) + "_cmp";
+ if (!Directory.Exists(newDir))
+ Directory.CreateDirectory(newDir);
+ foreach (string file in Directory.GetFiles(input))
+ {
+ Compress(file, newDir, format, guessExtension);
+ }
+ return;
+ }
+ else
+ {
+ if (!guessExtension)
+ output = input; // the .cdat extension is added automatically
+ else
+ output = Path.GetDirectoryName(input);
+ }
+ }
+
+ if (Directory.Exists(input))
+ {
+ if (!Directory.Exists(output))
+ Directory.CreateDirectory(output);
+ foreach (string file in Directory.GetFiles(input))
+ {
+ Compress(file, output, format, guessExtension);
+ }
+ return;
+ }
+
+
+ // compress the input
+ MemoryStream compressedData = new MemoryStream();
+ Formats compressedFormat;
+ int outsize = DoCompress(input, compressedData, format, out compressedFormat);
+ if (outsize < 0)
+ return;
+
+ bool mustAppendExt = !Directory.Exists(output) && !File.Exists(output);
+ if (Directory.Exists(output))
+ {
+ output = CombinePaths(output, Path.GetFileName(input));
+ }
+ if (mustAppendExt && Path.GetExtension(output) == ".dat")
+ output = RemoveExtension(output);
+ if (guessExtension)
+ output += "." + compressedFormat.ToString().ToLower();
+ else if (mustAppendExt)
+ output += ".cdat";
+
+ using (FileStream outStream = File.Create(output))
+ {
+ compressedData.WriteTo(outStream);
+ Console.WriteLine(compressedFormat.ToString() + "-compressed " + input + " to " + output);
+ }
+ }
+
+ private static int DoCompress(string infile, MemoryStream output, Formats format, out Formats actualFormat)
+ {
+ CompressionFormat fmt = null;
+ switch (format)
+ {
+ case Formats.LZ10: fmt = new LZ10(); break;
+ case Formats.LZ11: fmt = new LZ11(); break;
+ case Formats.LZOVL: fmt = new LZOvl(); break;
+ case Formats.RLE: fmt = new RLE(); break;
+ case Formats.HUFF4: fmt = new Huffman4(); break;
+ case Formats.HUFF8: fmt = new Huffman8(); break;
+ case Formats.HUFF:
+ return CompressHuff(infile, output, out actualFormat);
+ case Formats.GBA:
+ return CompressGBA(infile, output, out actualFormat);
+ case Formats.NDS:
+ return CompressNDS(infile, output, out actualFormat);
+ default:
+ throw new Exception("Unhandled compression format " + format);
+ }
+ actualFormat = format;
+
+ using (FileStream inStream = File.OpenRead(infile))
+ {
+ try
+ {
+ return fmt.Compress(inStream, inStream.Length, output);
+ }
+ catch (Exception s)
+ {
+ // any exception generated by compression is a fatal exception
+ Console.WriteLine(s.Message);
+ return -1;
+ }
+ }
+ }
+
+ private static int CompressHuff(string infile, MemoryStream output, out Formats actualFormat)
+ {
+ return CompressBest(infile, output, out actualFormat, Formats.HUFF4, Formats.HUFF8);
+ }
+
+ private static int CompressGBA(string infile, MemoryStream output, out Formats actualFormat)
+ {
+ return CompressBest(infile, output, out actualFormat, Formats.HUFF4, Formats.HUFF8, Formats.LZ10, Formats.RLE);
+ }
+
+ private static int CompressNDS(string infile, MemoryStream output, out Formats actualFormat)
+ {
+ return CompressBest(infile, output, out actualFormat, Formats.HUFF4, Formats.HUFF8, Formats.LZ10, Formats.LZ11, Formats.RLE);
+ }
+
+ private static int CompressBest(string infile, MemoryStream output, out Formats actualFormat, params Formats[] formats)
+ {
+ // only read the input data once from the file.
+ byte[] inputData;
+ using (FileStream inStream = File.OpenRead(infile))
+ {
+ inputData = new byte[inStream.Length];
+ inStream.Read(inputData, 0, inputData.Length);
+ }
+
+ MemoryStream bestOutput = null;
+ int minCompSize = int.MaxValue;
+ actualFormat = Formats.GBA;
+ foreach (Formats format in formats)
+ {
+ #region compress the file in each format, and save the best one
+
+ MemoryStream currentOutput = new MemoryStream();
+ CompressionFormat realFormat = null;
+ switch (format)
+ {
+ case Formats.HUFF4: realFormat = new Huffman4(); break;
+ case Formats.HUFF8: realFormat = new Huffman8(); break;
+ case Formats.LZ10: realFormat = new LZ10(); break;
+ case Formats.LZ11: realFormat = new LZ11(); break;
+ case Formats.LZOVL: realFormat = new LZOvl(); break;
+ case Formats.RLE: realFormat = new RLE(); break;
+ default:
+ Console.WriteLine("Unsupported single format: "+format);
+ continue;
+ }
+
+ int currentOutSize;
+ try
+ {
+ using (MemoryStream inStream = new MemoryStream(inputData))
+ {
+ currentOutSize = realFormat.Compress(inStream, inStream.Length, currentOutput);
+ }
+ }
+ catch (InputTooLargeException i)
+ {
+ Console.WriteLine(i.Message);
+ actualFormat = format;
+ return -1;
+ }
+ catch (Exception)
+ {
+ continue;
+ }
+ if (currentOutSize < minCompSize)
+ {
+ bestOutput = currentOutput;
+ minCompSize = currentOutSize;
+ actualFormat = format;
+ }
+
+ #endregion
+ }
+
+ if (bestOutput == null)
+ {
+ Console.WriteLine("The file could not be compressed in any format.");
+ return -1;
+ }
+ bestOutput.WriteTo(output);
+ return minCompSize;
+ }
+
+ #endregion
+
+ #region decompression methods
+
+ private static void Decompress(string input, string output, bool guessExtension)
+ {
+ if (!File.Exists(input) && !Directory.Exists(input))
+ {
+ Console.WriteLine("Cannot decompress a file or directory that does not exist (" + input + ")");
+ return;
+ }
+
+ // set the default value of the output
+ if (string.IsNullOrEmpty(output))
+ {
+ if (Directory.Exists(input))
+ {
+ string newDir = Path.GetFullPath(input) + "_dec";
+ if (!Directory.Exists(newDir))
+ Directory.CreateDirectory(newDir);
+ foreach (string file in Directory.GetFiles(input))
+ {
+ Decompress(file, newDir, guessExtension);
+ }
+ return;
+ }
+ else
+ {
+ if (!guessExtension)
+ output = input; // '.dat' gets added automatically if -ge is not given
+ else
+ output = Path.GetDirectoryName(input);
+ }
+ }
+
+ if (Directory.Exists(input))
+ {
+ if (File.Exists(output))
+ {
+ Console.WriteLine("Cannot decompress a folder to a single file.");
+ return;
+ }
+ if (!Directory.Exists(output))
+ Directory.CreateDirectory(output);
+ foreach (string file in Directory.GetFiles(input))
+ {
+ Decompress(file, output, guessExtension);
+ }
+ return;
+ }
+
+ byte[] inData;
+ using (FileStream inStream = File.OpenRead(input))
+ {
+ inData = new byte[inStream.Length];
+ inStream.Read(inData, 0, inData.Length);
+ }
+
+ MemoryStream decompressedData = new MemoryStream();
+ long decSize = -1;
+ Formats usedFormat = Formats.NDS;
+ // just try all formats, and stop once one has been found that can decompress it.
+ foreach (Formats f in Enum.GetValues(typeof(Formats)))
+ {
+ using (MemoryStream inStream = new MemoryStream(inData))
+ {
+ decSize = Decompress(inStream, decompressedData, null);
+ if (decSize >= 0)
+ {
+ usedFormat = f;
+ break;
+ }
+ }
+ }
+ if (decSize < 0)
+ {
+ Console.WriteLine("Could not decompress " + input + "; no matching compression method found.");
+ return;
+ }
+
+ bool mustAppendExt = !Directory.Exists(output) && !File.Exists(output);
+
+ if (Directory.Exists(output))
+ {
+ output = CombinePaths(output, Path.GetFileName(input));
+ }
+
+ byte[] outData = decompressedData.ToArray();
+ if (mustAppendExt)
+ {
+ switch (Path.GetExtension(output))
+ {
+ case ".cdat":
+ case ".lz10":
+ case ".lz11":
+ case ".lzovl":
+ case ".rle":
+ case ".huff4":
+ case ".huff8":
+ output = RemoveExtension(output);
+ break;
+ }
+ }
+ if (guessExtension)
+ {
+ string ext = "";
+ for (int i = 0; i < 4; i++)
+ {
+ if ((outData[i] >= 'a' && outData[i] <= 'z')
+ || (outData[i] >= 'A' && outData[i] <= 'Z')
+ || char.IsDigit((char)outData[i]))
+ ext += (char)outData[i];
+ else
+ break;
+ }
+ if (ext.Length > 0)
+ output += "." + ext;
+ else
+ output += ".dat";
+ }
+ else if(mustAppendExt)
+ output += ".dat";
+
+ using (FileStream outStream = File.Create(output))
+ {
+ outStream.Write(outData, 0, outData.Length);
+ Console.WriteLine(usedFormat.ToString() + "-decompressed " + input + " to " + output);
+ }
+
+ }
+
+ private static long Decompress(MemoryStream inputStream, MemoryStream output, CompressionFormat format)
+ {
+ if (!format.Supports(inputStream, inputStream.Length))
+ return -1;
+ try
+ {
+ return format.Decompress(inputStream, inputStream.Length, output);
+ }
+ catch (TooMuchInputException e)
+ {
+ Console.WriteLine(e.Message);
+ return output.Length;
+ }
+ catch (Exception e)
+ {
+ Console.WriteLine("Could not decompress using the " + format.ShortFormatString + " format; " + e.Message);
+ return -1;
+ }
+ }
+
+ #endregion
+
+ private static string CombinePaths(string dir, string file)
+ {
+ if (Path.IsPathRooted(file))
+ return file;
+ if (!dir.EndsWith(Path.DirectorySeparatorChar + "")
+ && !dir.EndsWith(Path.AltDirectorySeparatorChar + ""))
+ return dir + Path.DirectorySeparatorChar + file;
+ else
+ return dir + file;
+ }
+ private static string RemoveExtension(string path)
+ {
+ return Path.GetDirectoryName(path) + Path.DirectorySeparatorChar + Path.GetFileNameWithoutExtension(path);
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/NewestProgram.cs b/CSharp/DSDecmp/NewestProgram.cs
new file mode 100644
index 0000000..6ab58b7
--- /dev/null
+++ b/CSharp/DSDecmp/NewestProgram.cs
@@ -0,0 +1,594 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Reflection;
+using System.IO;
+using DSDecmp.Formats;
+using DSDecmp.Formats.Nitro;
+
+namespace DSDecmp
+{
+ public class NewestProgram
+ {
+#if DEBUG
+ public static string PluginFolder = "./Plugins/Debug";
+#else
+ public static string PluginFolder = "./Plugins";
+#endif
+
+ public static void Main(string[] args)
+ {
+ // I/O input:
+ // file -> read from file, save to file
+ // folder [-co] -> read all files from folder, save to folder_dec or folder_cmp (when decomp or comp resp.) (Same filenames)
+ // file newfile -> read from file, save to newfile
+ // folderin folderout [-co] -> read all files from folderin, save to folderout. (Same filenames)
+ // file1 file2 ... -> read file1, file2, etc
+ // file1 file2 ... folderout [-co] -> read file1, file2, etc, save to folderout. (same filenames)
+ // -> when -co is present, all files that could not be (de)compressed will be copied instead.
+
+ // preambles:
+ // -> decompress input to output using first matched format
+ // -d [-ge] -> decompress input to output using first matched format. If -ge, then guess the extension based on first 4 bytes.
+ // -d [-ge] -f -> decompress input to output using the indicated format. If -ge, then guess the extension based on first 4 bytes.
+ // -c [opt1 opt2 ...] -> compress input to output using the specified format and its options.
+
+ // built-in formats:
+ // lz10 -> LZ-0x10, found in >= GBA
+ // lz11 -> LZ-0x11, found in >= NDS
+ // lzovl -> LZ-Ovl/Overlay / backwards LZ, found mostly in NDS overlay files.
+ // huff4 -> 4-bit Huffman, found in >= GBA
+ // huff8 -> 8-bit Huffman, found in >= GBA
+ // huff -> any Huffman format.
+ // gba* -> any format natively supported by the GBA
+ // nds* -> any format natively supported by the NDS, but not LZ-Ovl
+ // when compressing, the best format of the selected set is used. when decompression,
+ // only the formats in the selected set are used.
+
+ if (args.Length == 0)
+ {
+ PrintUsage();
+ //Console.ReadLine();
+ return;
+ }
+
+ if (args[0] == "-c")
+ {
+ if (args.Length <= 2) { Console.WriteLine("Too few arguments."); return; }
+ CompressionFormat format = FirstOrDefault(GetFormat(args[1]));
+ if (format == null) { return; }
+
+ string[] ioArgs = new string[args.Length - 2];
+ Array.Copy(args, 2, ioArgs, 0, ioArgs.Length);
+
+ int optionCount = format.ParseCompressionOptions(ioArgs);
+ string[] realIoArgs = new string[ioArgs.Length - optionCount];
+ Array.Copy(ioArgs, optionCount, realIoArgs, 0, realIoArgs.Length);
+
+ Compress(realIoArgs, format);
+ }
+ else if (args[0] == "-d")
+ {
+ if (args.Length <= 1) { PrintUsage(); return; }
+ int ioIdx = 1;
+ bool guessExtension = false;
+ if (args[ioIdx] == "-ge")
+ {
+ guessExtension = true;
+ ioIdx++;
+ }
+ IEnumerable formats = GetAllFormats(false); // we do not need the built-in composite formats to decompress.
+ if (args[ioIdx] == "-f")
+ {
+ if (args.Length <= ioIdx + 2) { Console.WriteLine("Too few arguments."); return; }
+ formats = GetFormat(args[ioIdx + 1]);
+ ioIdx += 2;
+ }
+ if (formats == null) { return; }
+ if (args.Length <= ioIdx) { Console.WriteLine("Too few arguments."); return; }
+
+ string[] ioArgs = new string[args.Length - ioIdx];
+ Array.Copy(args, ioIdx, ioArgs, 0, ioArgs.Length);
+
+ Decompress(ioArgs, formats, guessExtension);
+ }
+ else
+ {
+ Decompress(args, GetAllFormats(false), false);
+ }
+ }
+
+ #region Usage printer
+ private static void PrintUsage()
+ {
+ Console.WriteLine("DSDecmp - Decompressor for compression formats used on the NDS - by Barubary");
+ Console.WriteLine();
+ Console.WriteLine("Usage:\tDSDecmp FMTARGS IOARGS");
+ Console.WriteLine();
+ Console.WriteLine("IOARGS can be:");
+ Console.WriteLine("-------------------------------------------------------------------------------");
+ Console.WriteLine("file -> read the file, overwrite it.");
+ Console.WriteLine("folder [-co] -> read all files from folder, save to folder_dec");
+ Console.WriteLine(" or folder_cmp.");
+ Console.WriteLine("file newfile -> read the file, save it to newfile.");
+ Console.WriteLine(" (newfile cannot exist yet)");
+ Console.WriteLine("folderin folderout [-co] -> read all files from folderin, save to folderout.");
+ Console.WriteLine("file1 file2 ... -> read file1, file2, etc; overwrite them.");
+ Console.WriteLine("file1 file2 ... folderout [-co] -> read file1, file2, etc; save to folderout.");
+ Console.WriteLine();
+ Console.WriteLine("When -co is present, all files that could not be handled will be copied to the");
+ Console.WriteLine(" indicated output folder.");
+ Console.WriteLine("-------------------------------------------------------------------------------");
+ Console.WriteLine();
+ Console.WriteLine("FMTARGS can be:");
+ Console.WriteLine("-------------------------------------------------------------------------------");
+ Console.WriteLine(" -> try to decompress input to output.");
+ Console.WriteLine("-d [-ge] -> try to decompress input to output.");
+ Console.WriteLine("-d [-ge] -f -> try to decompress input to output, using fiven format");
+ Console.WriteLine("-c [opt1 ...] -> compress intput to output using given format ");
+ Console.WriteLine(" and options.");
+ Console.WriteLine();
+ Console.WriteLine("When -ge is present, the extension of the output file will be determined by");
+ Console.WriteLine(" the first 4 bytes of the decompressed data. (of those are alphanuemric ASCII");
+ Console.WriteLine(" characters).");
+ Console.WriteLine("-------------------------------------------------------------------------------");
+ Console.WriteLine("Supported formats:");
+ Console.WriteLine(" -> description");
+ foreach (CompressionFormat fmt in GetAllFormats(true))
+ {
+ Console.WriteLine(fmt.CompressionFlag.PadRight(7, ' ') + "-> " + fmt.Description);
+ }
+ Console.WriteLine("-------------------------------------------------------------------------------");
+ }
+ #endregion
+
+ #region Method: Decompress(string[] ioArgs, IEnumerable formats)
+ private static void Decompress(string[] ioArgs, IEnumerable formats, bool guessExtension)
+ {
+ string[] inputFiles;
+ string outputDir;
+ bool copyErrors;
+ if (!ParseIOArguments(ioArgs, false, out inputFiles, out outputDir, out copyErrors))
+ return;
+
+ foreach (string input in inputFiles)
+ {
+ string outputFile = outputDir ?? IOUtils.GetParent(input);
+ if (Directory.Exists(outputDir))
+ outputFile = Path.Combine(outputFile + Path.DirectorySeparatorChar, Path.GetFileName(input));
+
+ try
+ {
+ // read the file only once.
+ byte[] inputData;
+ using (Stream inStream = File.OpenRead(input))
+ {
+ inputData = new byte[inStream.Length];
+ inStream.Read(inputData, 0, inputData.Length);
+ }
+ bool decompressed = false;
+ foreach (CompressionFormat format in formats)
+ {
+ if (!format.SupportsDecompression)
+ continue;
+
+ #region try to decompress using the current format
+
+ using (MemoryStream inStr = new MemoryStream(inputData),
+ outStr = new MemoryStream())
+ {
+ if (!format.Supports(inStr, inputData.Length))
+ continue;
+ try
+ {
+ long decompSize = format.Decompress(inStr, inputData.Length, outStr);
+ if (decompSize < 0)
+ continue;
+ if (guessExtension)
+ {
+ string outFileName = Path.GetFileNameWithoutExtension(outputFile);
+ outStr.Position = 0;
+ byte[] magic = new byte[4];
+ outStr.Read(magic, 0, 4);
+ outStr.Position = 0;
+ outFileName += "." + GuessExtension(magic, Path.GetExtension(outputFile).Substring(1));
+ outputFile = outputFile.Replace(Path.GetFileName(outputFile), outFileName);
+ }
+ using (FileStream output = File.Create(outputFile))
+ {
+ outStr.WriteTo(output);
+ }
+ decompressed = true;
+ Console.WriteLine(format.ShortFormatString + "-decompressed " + input + " to " + outputFile);
+ break;
+ }
+ catch (TooMuchInputException tmie)
+ {
+ // a TMIE is fine. let the user know and continue saving the decompressed data.
+ Console.WriteLine(tmie.Message);
+ if (guessExtension)
+ {
+ string outFileName = Path.GetFileNameWithoutExtension(outputFile);
+ outStr.Position = 0;
+ byte[] magic = new byte[4];
+ outStr.Read(magic, 0, 4);
+ outStr.Position = 0;
+ outFileName += "." + GuessExtension(magic, Path.GetExtension(outputFile).Substring(1));
+ outputFile = outputFile.Replace(Path.GetFileName(outputFile), outFileName);
+ }
+ using (FileStream output = File.Create(outputFile))
+ {
+ outStr.WriteTo(output);
+ }
+ decompressed = true;
+ Console.WriteLine(format.ShortFormatString + "-decompressed " + input + " to " + outputFile);
+ break;
+ }
+ catch (Exception)
+ {
+ continue;
+ }
+ }
+
+ #endregion
+ }
+
+ if (!decompressed)
+ {
+ #region copy or print and continue
+
+ if (copyErrors)
+ {
+ Copy(input, outputFile);
+ }
+ else
+ Console.WriteLine("No suitable decompressor found for " + input + ".");
+
+ #endregion
+ }
+ }
+ catch (FileNotFoundException)
+ {
+ Console.WriteLine("The file " + input + " does not exist.");
+ continue;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine("Could not load file " + input + ";");
+ Console.WriteLine(ex.Message);
+#if DEBUG
+ Console.WriteLine(ex.StackTrace);
+#endif
+ }
+ } // end foreach input
+ }
+ #endregion Method: Decompress
+
+ #region Method: Compress
+ ///
+ /// (Attempts to) Compress the given input to the given output, using the given format.
+ ///
+ /// The I/O arguments from the program input.
+ /// The desired format to compress with.
+ private static void Compress(string[] ioArgs, CompressionFormat format)
+ {
+ if (!format.SupportsCompression)
+ {
+ Console.WriteLine("Cannot compress using " + format.ShortFormatString + "; compression is not supported.");
+ return;
+ }
+
+ string[] inputFiles;
+ string outputDir;
+ bool copyErrors;
+ if (!ParseIOArguments(ioArgs, true, out inputFiles, out outputDir, out copyErrors))
+ return;
+
+ foreach (string input in inputFiles)
+ {
+ string outputFile = outputDir ?? IOUtils.GetParent(input);
+ if (Directory.Exists(outputDir))
+ outputFile = Path.Combine(outputFile + Path.DirectorySeparatorChar, Path.GetFileName(input));
+
+ try
+ {
+ // read the file only once.
+ byte[] inputData;
+ using (Stream inStream = File.OpenRead(input))
+ {
+ inputData = new byte[inStream.Length];
+ inStream.Read(inputData, 0, inputData.Length);
+ }
+
+ #region try to compress
+
+ using (MemoryStream inStr = new MemoryStream(inputData),
+ outStr = new MemoryStream())
+ {
+ try
+ {
+ long compSize = format.Compress(inStr, inputData.Length, outStr);
+ if (compSize > 0)
+ {
+ using (FileStream output = File.Create(outputFile))
+ {
+ outStr.WriteTo(output);
+ }
+ if (format is CompositeFormat)
+ Console.Write((format as CompositeFormat).LastUsedCompressFormatString);
+ else
+ Console.Write(format.ShortFormatString);
+ Console.WriteLine("-compressed " + input + " to " + outputFile);
+ }
+ }
+ catch (Exception ex)
+ {
+ #region copy or print and continue
+
+ if (copyErrors)
+ {
+ Copy(input, outputFile);
+ }
+ else
+ {
+ Console.WriteLine("Could not " + format.ShortFormatString + "-compress " + input + ";");
+ Console.WriteLine(ex.Message);
+#if DEBUG
+ Console.WriteLine(ex.StackTrace);
+#endif
+ }
+
+ #endregion
+ }
+ }
+
+ #endregion
+ }
+ catch (FileNotFoundException)
+ {
+ Console.WriteLine("The file " + input + " does not exist.");
+ continue;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine("Could not load file " + input + ";");
+ Console.WriteLine(ex.Message);
+#if DEBUG
+ Console.WriteLine(ex.StackTrace);
+#endif
+ }
+ } // end foreach input
+ }
+ #endregion Method: Compress
+
+ #region Method: ParseIOArguments
+ ///
+ /// Parses the IO arguments of the input.
+ ///
+ /// The arguments to parse.
+ /// If the arguments are used for compression. If not, decompression is assumed. (used for default output folder name)
+ /// The files to handle as input.
+ /// The directory to save the handled files in. If this is null,
+ /// the files should be overwritten. If this does not exist, it is the output file
+ /// (the input may only contain one file if that si the case).
+ /// If files that cannot be handled (properly) should be copied to the output directory.
+ /// True iff parsing of the arguments succeeded.
+ private static bool ParseIOArguments(string[] ioArgs, bool compress, out string[] inputFiles, out string outputDir, out bool copyErrors)
+ {
+ inputFiles = null;
+ // when null, output dir = input dir. if it does not exist, it is the output file (only possible when only one input file).
+ outputDir = null;
+ copyErrors = false;
+
+ #region check if the -co flag is present
+ if (ioArgs.Length > 0 && ioArgs[ioArgs.Length - 1] == "-co")
+ {
+ string[] newIoArgs = new string[ioArgs.Length - 1];
+ Array.Copy(ioArgs, newIoArgs, newIoArgs.Length);
+ ioArgs = newIoArgs;
+ copyErrors = true;
+ }
+ #endregion
+
+ switch (ioArgs.Length)
+ {
+ case 0:
+ Console.WriteLine("No input file given.");
+ return false;
+ case 1:
+ if (Directory.Exists(ioArgs[0]))
+ {
+ inputFiles = Directory.GetFiles(ioArgs[0]);
+ if (compress)
+ outputDir = Path.GetFullPath(ioArgs[0]) + "_cmp";
+ else
+ outputDir = Path.GetFullPath(ioArgs[0]) + "_dec";
+ if (!Directory.Exists(outputDir))
+ Directory.CreateDirectory(outputDir);
+ break;
+ }
+ else if (File.Exists(ioArgs[0]))
+ {
+ inputFiles = ioArgs;
+ outputDir = null;
+ break;
+ }
+ else
+ {
+ Console.WriteLine("The file " + ioArgs[0] + " does not exist.");
+ return false;
+ }
+ case 2:
+ if (Directory.Exists(ioArgs[0]))
+ {
+ inputFiles = Directory.GetFiles(ioArgs[0]);
+ outputDir = ioArgs[1];
+ if (!Directory.Exists(outputDir))
+ Directory.CreateDirectory(outputDir);
+ break;
+ }
+ else if (File.Exists(ioArgs[0]))
+ {
+ if (File.Exists(ioArgs[1]))
+ {
+ inputFiles = ioArgs;
+ outputDir = null;
+ break;
+ }
+ else// if (Directory.Exists(ioArgs[1]))
+ // both nonexisting file and existing directory is handled the same.
+ {
+ inputFiles = new string[] { ioArgs[0] };
+ outputDir = ioArgs[1];
+ break;
+ }
+ }
+ else
+ {
+ Console.WriteLine("The file " + ioArgs[0] + " does not exist.");
+ return false;
+ }
+ default:
+ if (File.Exists(ioArgs[ioArgs.Length - 1]))
+ {
+ inputFiles = ioArgs;
+ outputDir = null;
+ break;
+ }
+ else //if (Directory.Exists(ioArgs[ioArgs.Length - 1]))
+ // both existing and nonexisting directories are fine.
+ {
+ outputDir = ioArgs[ioArgs.Length - 1];
+ inputFiles = new string[ioArgs.Length - 1];
+ Array.Copy(ioArgs, inputFiles, inputFiles.Length);
+
+ // but we must make sure the output directory exists.
+ if (!Directory.Exists(outputDir))
+ Directory.CreateDirectory(outputDir);
+ break;
+ }
+ }
+
+ return true;
+ }
+ #endregion ParseIOArguments
+
+ #region Method: GuessExtension(magic, defaultExt)
+ ///
+ /// Guess the extension of a file by looking at the given magic bytes of a file.
+ /// If they are alphanumeric (without accents), they could indicate the type of file.
+ /// If no sensible extension could be found from the magic bytes, the given default extension is returned.
+ ///
+ private static string GuessExtension(byte[] magic, string defaultExt)
+ {
+ string ext = "";
+ for (int i = 0; i < magic.Length && i < 4; i++)
+ {
+ if ((magic[i] >= 'a' && magic[i] <= 'z') || (magic[i] >= 'A' && magic[i] <= 'Z')
+ || char.IsDigit((char)magic[i]))
+ {
+ ext += (char)magic[i];
+ }
+ else
+ break;
+ }
+ if (ext.Length <= 1)
+ return defaultExt;
+ return ext;
+ }
+ #endregion
+
+ ///
+ /// Copies the source file to the destination path.
+ ///
+ private static void Copy(string sourcefile, string destfile)
+ {
+ if (Path.GetFullPath(sourcefile) == Path.GetFullPath(destfile))
+ return;
+ File.Copy(sourcefile, destfile);
+ Console.WriteLine("Copied " + sourcefile + " to " + destfile);
+ }
+
+ #region Format sequence getters
+ ///
+ /// Gets the compression format corresponding to the given format string.
+ ///
+ private static IEnumerable GetFormat(string formatstring)
+ {
+ if (formatstring == null)
+ yield break;
+ foreach (CompressionFormat fmt in GetAllFormats(true))
+ if (fmt.CompressionFlag == formatstring)
+ {
+ yield return fmt;
+ yield break;
+ }
+ Console.WriteLine("No such compression format: " + formatstring);
+ }
+
+ ///
+ /// Gets a sequence over all compression formats currently supported; both built-in and plugin-based.
+ ///
+ private static IEnumerable GetAllFormats(bool alsoBuiltInCompositeFormats)
+ {
+ foreach (CompressionFormat fmt in GetBuiltInFormats(alsoBuiltInCompositeFormats))
+ yield return fmt;
+ foreach (CompressionFormat fmt in GetPluginFormats())
+ yield return fmt;
+ }
+
+ ///
+ /// Gets a sequence over all built-in compression formats.
+ ///
+ /// If the built-in composite formats should also be part of the sequence.
+ private static IEnumerable GetBuiltInFormats(bool alsoCompositeFormats)
+ {
+ yield return new LZOvl();
+ yield return new LZ10();
+ yield return new LZ11();
+ yield return new Huffman4();
+ yield return new Huffman8();
+ yield return new RLE();
+ yield return new NullCompression();
+ if (alsoCompositeFormats)
+ {
+ yield return new HuffmanAny();
+ yield return new CompositeGBAFormat();
+ yield return new CompositeNDSFormat();
+ }
+ }
+ ///
+ /// Gets a sequence over all formats that can be used from plugins.
+ ///
+ private static IEnumerable GetPluginFormats()
+ {
+ string pluginPath = Directory.GetParent(Assembly.GetExecutingAssembly().Location).FullName;
+ pluginPath = Path.Combine(pluginPath, PluginFolder);
+ if (System.IO.Directory.Exists(pluginPath))
+ {
+ foreach (CompressionFormat fmt in IOUtils.LoadCompressionPlugins(pluginPath))
+ yield return fmt;
+ }
+ else
+ {
+ Console.WriteLine("Plugin folder " + pluginPath + " is not present; only built-in formats are supported.");
+ }
+ }
+ #endregion
+
+ ///
+ /// Gets the first item from the given sequence, or the default value of the type in the sequence
+ /// if it is empty.
+ ///
+ private static T FirstOrDefault(IEnumerable sequence)
+ {
+ if (sequence != null)
+ {
+ IEnumerator enumerator = sequence.GetEnumerator();
+ if (enumerator.MoveNext())
+ return enumerator.Current;
+ }
+ return default(T);
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Program.cs b/CSharp/DSDecmp/Program.cs
new file mode 100644
index 0000000..0772f4e
--- /dev/null
+++ b/CSharp/DSDecmp/Program.cs
@@ -0,0 +1,1464 @@
+//Copyright (c) 2010 Nick Kraayenbrink
+//
+//Permission is hereby granted, free of charge, to any person obtaining a copy
+//of this software and associated documentation files (the "Software"), to deal
+//in the Software without restriction, including without limitation the rights
+//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+//copies of the Software, and to permit persons to whom the Software is
+//furnished to do so, subject to the following conditions:
+//
+//The above copyright notice and this permission notice shall be included in
+//all copies or substantial portions of the Software.
+//
+//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+//THE SOFTWARE.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+using System.Text.RegularExpressions;
+
+namespace DSDecmp
+{
+ unsafe class Program
+ {
+ static uint MAX_OUTSIZE = 0xA00000;
+ static bool showAlways = true;
+
+ const int LZ10_TAG = 0x10,
+ LZ11_TAG = 0x11,
+ HUFF_TAG = 0x20, // actually 0x28 and 0x24
+ RLE_TAG = 0x30,
+ LZ40_TAG = 0x40,
+ NONE_TAG = 0x00;
+
+ static bool CopyErrors = false;
+ static bool AllowHuff = true;
+ static bool AllowRLE = true;
+ static bool AllowNone = true;
+ static bool AllowLZ10 = true;
+ static bool AllowLZ11 = true;
+ static bool AllowLZ40 = true;
+ static bool AllowOVL = true;
+ static bool ForceOVL = false;
+
+ /*
+ static void Main(string[] args)
+ {
+ Console.WriteLine(-14 % 10);
+ Console.ReadLine();
+ }/**/
+
+ public static void Main1(string[] args)
+ {
+
+ if (args.Length == 0) { Usage(); return; }
+ if (args[0] == "-ce")
+ {
+ CopyErrors = true;
+ string[] newArgs = new string[args.Length - 1];
+ Array.Copy(args, 1, newArgs, 0, newArgs.Length);
+ args = newArgs;
+ }
+ if (args.Length == 0) { Usage(); return; }
+ if (args[0].StartsWith("-n"))
+ {
+ string rest = args[0].Substring(2);
+ AllowHuff = !rest.Contains("h");
+ AllowLZ10 = !rest.Contains("0");
+ AllowLZ11 = !rest.Contains("1");
+ AllowLZ40 = !rest.Contains("4");
+ AllowNone = !rest.Contains("n");
+ AllowRLE = !rest.Contains("r");
+ AllowOVL = !rest.Contains("o");
+ string[] newArgs = new string[args.Length - 1];
+ Array.Copy(args, 1, newArgs, 0, newArgs.Length);
+ args = newArgs;
+ }
+ else if (args[0] == "-ovl")
+ {
+ ForceOVL = true;
+ string[] newArgs = new string[args.Length - 1];
+ Array.Copy(args, 1, newArgs, 0, newArgs.Length);
+ args = newArgs;
+ }
+
+ if (args.Length == 1)
+ {
+ if (Directory.Exists(args[0])) // only directory given? output to same directory
+ args = new string[] { args[0], args[0] };
+ else if (File.Exists(args[0])) // only file given? output to same dir as file
+ args = new string[] { args[0], Directory.GetParent(args[0]).FullName };
+ }
+
+ if (args.Length != 2 && args.Length != 3)
+ {
+ Usage();
+ return;
+ }
+ if (args.Length == 3)
+ MAX_OUTSIZE = uint.Parse(args[2], System.Globalization.NumberStyles.HexNumber);/**/
+
+
+ args[0] = makeSlashes(args[0]);
+ args[1] = makeSlashes(args[1]);
+ /**/
+
+ if (!Directory.Exists(args[1]))
+ Directory.CreateDirectory(args[1]);
+ if (File.Exists(args[0]))/**/
+ Decompress(args[0], args[1]);
+ else
+ DecompressFolder(args[0], args[1]);/**/
+ }
+
+ private static void Usage()
+ {
+ Console.WriteLine("useage: DSDecmp (-ce) (-n[h014nro] | -ovl) infile [outfolder [maxlen]]");
+ Console.WriteLine("or: DSDecmp (-ce) (-n[h014nro] | -ovl) infolder [outfolder [maxlen]]");
+ Console.WriteLine("maxlen is optional and hexadecimal, and all files that would be larger than maxlen when decompressed are ignored");
+ Console.WriteLine("Adding the -ce flag will copy every file that generates an error while processing to the output dir, and does not wait for user confirmation.");
+ Console.WriteLine("Adding the -n flag with any number of the characters h,0,1,n, or r will disable compression formats of the corresponding letter;");
+ Console.WriteLine("h - Huffman");
+ Console.WriteLine("0 - LZ 0x10");
+ Console.WriteLine("1 - LZ 0x11");
+ Console.WriteLine("4 - LZ 0x40");
+ Console.WriteLine("n - None-compression (ie: 0x00 first byte, next 3 bytes file size - 4)");
+ Console.WriteLine("r - Run-Length Encoding");
+ Console.WriteLine("o - LZ Overlay compression");
+ Console.WriteLine();
+ Console.WriteLine("Providing the -ovl flag (the -n and -ovl flags cannot appear together) will "
+ + "try to decompress the given file(s) with the DS's overlay compression. Normally, "
+ + "this format is only recognized if the file name is 'arm9.bin' or "
+ + "'overlay_X.bin' (with X any number).");
+ }
+
+ private static void WriteDebug(string s)
+ {
+ System.Diagnostics.Debug.WriteLine(s);
+ if (showAlways)
+ Console.WriteLine(s);
+ }
+
+ #region method: DecompressFolder
+ private static void DecompressFolder(string inflr, string outflr)
+ {
+ showAlways = false; // only print errors/failures
+
+ if (!outflr.EndsWith("/") && !outflr.EndsWith("\\"))
+ outflr += "/";
+ StreamWriter sw = null;
+ if (!Directory.Exists(inflr))
+ {
+ Console.WriteLine("No such file or folder: " + inflr);
+ return;
+ }
+ string[] files = Directory.GetFiles(inflr);
+ foreach (string fname in files)
+ try
+ {
+ Decompress(makeSlashes(fname), outflr);
+ }
+ catch (Exception e)
+ {
+ if (sw == null)
+ sw = new StreamWriter(new FileStream(outflr + "lzsslog.txt", FileMode.Create));
+ Console.WriteLine(e.Message);
+ sw.WriteLine(e.Message);
+ string copied = fname.Replace(inflr, outflr);
+ if (!File.Exists(copied))
+ File.Copy(fname, copied);
+ }
+ Console.WriteLine("Done decompressing files in folder " + inflr);
+ if (sw != null)
+ {
+ Console.WriteLine("Errors have been logged to " + outflr + "lzsslog.txt");
+ sw.Flush();
+ sw.Close();
+ }
+ }
+ #endregion
+
+ #region Method: Decompress
+ static void Decompress(string filein, string outflr)
+ {
+ // check if we need to decompress the file using Overlay compression first
+ string filename = Path.GetFileName(filein);
+ if (AllowOVL)
+ {
+ if (filename == "arm9.bin"
+ || Regex.Match(filename, "overlay_[0-9]+\\.bin").Success
+ || ForceOVL)
+ {
+ try
+ {
+ DecompressLZOverlay(filein, outflr);
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine("Could not properly decompress {0:s};", filein);
+ Console.WriteLine(ex.Message);
+ Console.WriteLine(ex.StackTrace);
+ if (CopyErrors)
+ CopyFile(filein, outflr);
+ else
+ Console.ReadLine();
+ }
+ return;
+ }
+ }
+
+
+ FileStream fstr = File.OpenRead(filein);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Files larger than 2GB cannot be decompressed by this program.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ byte tag = br.ReadByte();
+ br.Close();
+ try
+ {
+ switch (tag >> 4)
+ {
+ case LZ10_TAG >> 4:
+ if (tag == LZ10_TAG && AllowLZ10)
+ DecompressLZ77(filein, outflr);
+ else if (tag == LZ11_TAG && AllowLZ11)
+ Decompress11LZS(filein, outflr);
+ else
+ CopyFile(filein, outflr);
+ break;
+ case LZ40_TAG >> 4:
+ if (AllowLZ40 && tag == LZ40_TAG) // LZ40 tag must match completely
+ DecompressLZ40(filein, outflr);
+ else
+ CopyFile(filein, outflr);
+ break;
+ case RLE_TAG >> 4:
+ if (AllowRLE && tag == RLE_TAG) // RLE tag must match completely
+ DecompressRLE(filein, outflr);
+ else
+ CopyFile(filein, outflr);
+ break;
+ case NONE_TAG >> 4:
+ if (AllowNone && tag == NONE_TAG)// NONE tag must match completely
+ DecompressNone(filein, outflr);
+ else
+ CopyFile(filein, outflr);
+ break;
+ case HUFF_TAG >> 4:
+ if (AllowHuff) // huff tag only needs t match the first 4 bits
+ // throws InvalidDataException if first 4 bits matched by accident
+ DecompressHuffman(filein, outflr);
+ else
+ CopyFile(filein, outflr);
+ break;
+ default: CopyFile(filein, outflr); break;
+ }
+ }
+ catch (InvalidDataException)
+ {
+ CopyFile(filein, outflr);
+ }
+ catch (Exception e)
+ {
+ Console.WriteLine("Could not properly decompress {0:s};", filein);
+ Console.WriteLine(e.Message);
+ Console.WriteLine(e.StackTrace);
+ if (CopyErrors)
+ CopyFile(filein, outflr);
+ else
+ Console.ReadLine();
+ }
+ }
+ #endregion
+
+ #region Method: CopyFile
+ ///
+ /// Copies a file
+ ///
+ /// The input file
+ /// The output folder. (the file keeps its name, other files get overwritten)
+ static void CopyFile(string filein, string outflr)
+ {
+ filein = makeSlashes(filein);
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ outfname = outflr + outfname;
+ File.Copy(filein, outfname, true);
+ Console.WriteLine("Copied " + filein + " to " + outflr);
+ }
+ #endregion
+
+ #region RLE
+ static void DecompressRLE(string filein, string outflr)
+ {
+ /* SWI 14h (GBA/NDS7/NDS9) - RLUnCompWram
+ SWI 15h (GBA/NDS7/NDS9) - RLUnCompVram (NDS: with Callback)
+ Expands run-length compressed data. The Wram function is faster, and writes in units of 8bits. For the Vram function the destination must be halfword aligned, data is written in units of 16bits.
+ If the size of the compressed data is not a multiple of 4, please adjust it as much as possible by padding with 0. Align the source address to a 4Byte boundary.
+
+ r0 Source Address, pointing to data as such:
+ Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 3 for run-length)
+ Bit 8-31 Size of decompressed data
+ Repeat below. Each Flag Byte followed by one or more Data Bytes.
+ Flag data (8bit)
+ Bit 0-6 Expanded Data Length (uncompressed N-1, compressed N-3)
+ Bit 7 Flag (0=uncompressed, 1=compressed)
+ Data Byte(s) - N uncompressed bytes, or 1 byte repeated N times
+ r1 Destination Address
+ r2 Callback parameter (NDS SWI 15h only, see Callback notes below)
+ r3 Callback structure (NDS SWI 15h only, see Callback notes below)
+
+ Return: No return value, Data written to destination address.*/
+
+ FileStream fstr = new FileStream(filein, FileMode.Open);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Files larger than 2GB cannot be RLE-compressed files.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ long decomp_size = 0, curr_size = 0;
+ int i, rl;
+ byte flag, b;
+ bool compressed;
+
+ if (br.ReadByte() != RLE_TAG)
+ throw new InvalidDataException(String.Format("File {0:s} is not a valid RLE file", filein));
+ for (i = 0; i < 3; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} and will not be decompressed.", filein, MAX_OUTSIZE));
+
+ if (showAlways)
+ Console.WriteLine("Decompressing {0:s}. (outsize: 0x{1:x})", filein, decomp_size);
+
+ #region decompress
+ byte[] outdata = new byte[decomp_size];
+
+ while (true)
+ {
+ // get tag
+ try { flag = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }
+ compressed = (flag & 0x80) > 0;
+ rl = flag & 0x7F;
+ if (compressed)
+ rl += 3;
+ else
+ rl += 1;
+ //curr_size += rl;
+ if (compressed)
+ {
+ try { b = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }// throw new Exception(String.Format("Invalid RLE format in file {0:s}; incomplete data near EOF.", filein)); }
+ for (i = 0; i < rl; i++)
+ outdata[curr_size++] = b;
+ }
+ else
+ for (i = 0; i < rl; i++)
+ try { outdata[curr_size++] = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }// throw new Exception(String.Format("Invalid RLE format in file {0:s}; incomplete data near EOF.", filein)); }
+
+ if (curr_size > decomp_size)
+ {
+ Console.WriteLine("curr_size > decomp_size; {0:x}>{1:x}", curr_size, decomp_size);
+ break;// throw new Exception(String.Format("File {0:s} is not a valid LZSS file; actual output size > output size in header", filein));
+ }
+ if (curr_size == decomp_size)
+ break;
+ }
+ #endregion
+
+
+ #region save
+ string ext = "";
+ for (i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit((char)outdata[i]))
+ ext += (char)outdata[i];
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ filein = filein.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ /*while (File.Exists(outflr + outfname + ext))
+ outfname += "_";/**/
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.Create));
+ for (i = 0; i < outdata.Length; i++)
+ bw.Write(outdata[i]);
+ //bw.Write(outdata);
+ bw.Flush();
+ bw.Close();
+
+ #endregion
+
+ Console.WriteLine("RLE decompressed " + filein);
+ }
+ #endregion
+
+ #region Huffman
+ static void DecompressHuffman(String filename, String outflr)
+ {
+ /*
+ Data Header (32bit)
+ Bit0-3 Data size in bit units (normally 4 or 8)
+ Bit4-7 Compressed type (must be 2 for Huffman)
+ Bit8-31 24bit size of decompressed data in bytes
+ Tree Size (8bit)
+ Bit0-7 Size of Tree Table/2-1 (ie. Offset to Compressed Bitstream)
+ Tree Table (list of 8bit nodes, starting with the root node)
+ Root Node and Non-Data-Child Nodes are:
+ Bit0-5 Offset to next child node,
+ Next child node0 is at (CurrentAddr AND NOT 1)+Offset*2+2
+ Next child node1 is at (CurrentAddr AND NOT 1)+Offset*2+2+1
+ Bit6 Node1 End Flag (1=Next child node is data)
+ Bit7 Node0 End Flag (1=Next child node is data)
+ Data nodes are (when End Flag was set in parent node):
+ Bit0-7 Data (upper bits should be zero if Data Size is less than 8)
+ Compressed Bitstream (stored in units of 32bits)
+ Bit0-31 Node Bits (Bit31=First Bit) (0=Node0, 1=Node1)
+ */
+
+ BinaryReader br = new BinaryReader(File.OpenRead(filename));
+
+ byte firstByte = br.ReadByte();
+
+ int dataSize = firstByte & 0x0F;
+
+ if ((firstByte & 0xF0) != HUFF_TAG)
+ throw new InvalidDataException(String.Format("Invalid huffman comressed file; invalid tag {0:x}", firstByte));
+
+ //Console.WriteLine("Data size: {0:x}", dataSize);
+ if (dataSize != 8 && dataSize != 4)
+ throw new InvalidDataException(String.Format("Unhandled dataSize {0:x}", dataSize));
+
+ int decomp_size = 0;
+ for (int i = 0; i < 3; i++)
+ {
+ decomp_size |= br.ReadByte() << (i * 8);
+ }
+ //Console.WriteLine("Decompressed size: {0:x}", decomp_size);
+
+ byte treeSize = br.ReadByte();
+ HuffTreeNode.maxInpos = 4 + (treeSize + 1) * 2;
+
+ Console.WriteLine("Tree Size: {0:x}", treeSize);
+ Console.WriteLine("Tee end: 0x{0:X}", HuffTreeNode.maxInpos);
+
+ HuffTreeNode rootNode = new HuffTreeNode();
+ rootNode.parseData(br);
+
+ //Console.WriteLine("Tree: {0:s}", rootNode.ToString());
+
+ br.BaseStream.Position = 4 + (treeSize + 1) * 2; // go to start of coded bitstream.
+ // read all data
+ uint[] indata = new uint[(br.BaseStream.Length - br.BaseStream.Position) / 4];
+ for (int i = 0; i < indata.Length; i++)
+ indata[i] = br.ReadUInt32();
+
+ //Console.WriteLine(indata[0]);
+ //Console.WriteLine(uint_to_bits(indata[0]));
+
+ long curr_size = 0;
+ decomp_size *= dataSize == 8 ? 1 : 2;
+ byte[] outdata = new byte[decomp_size];
+
+ int idx = -1;
+ string codestr = "";
+ LinkedList code = new LinkedList();
+ int value;
+
+ decomp_size = 0x100;
+
+ while (curr_size < decomp_size)
+ {
+ try
+ {
+ string newstr = uint_to_bits(indata[++idx]);
+ codestr += newstr;
+ Console.WriteLine("next uint: "+newstr);
+ }
+ catch (IndexOutOfRangeException e)
+ {
+ throw new IndexOutOfRangeException("not enough data.", e);
+ }
+ while (codestr.Length > 0)
+ {
+ code.AddFirst(byte.Parse(codestr[0] + ""));
+ //Console.Write(code.First.Value);
+ codestr = codestr.Remove(0, 1);
+ if (rootNode.getValue(code.Last, out value))
+ {
+ //Console.WriteLine(" -> "+value.ToString("X"));
+ try
+ {
+ outdata[curr_size++] = (byte)value;
+ }
+ catch (IndexOutOfRangeException ex)
+ {
+ if (code.First.Value != 0)
+ throw ex;
+ }
+ code.Clear();
+ }
+ }
+ }
+ /*
+ if (codestr.Length > 0 || idx < indata.Length-1)
+ {
+ while (idx < indata.Length-1)
+ codestr += uint_to_bits(indata[++idx]);
+ codestr = codestr.Replace("0", "");
+ if (codestr.Length > 0)
+ Console.WriteLine("too much data; str={0:s}, idx={1:g}/{2:g}", codestr, idx, indata.Length);
+ }/**/
+
+ br.Close();
+
+ byte[] realout;
+ if (dataSize == 4)
+ {
+ realout = new byte[decomp_size / 2];
+ for (int i = 0; i < decomp_size / 2; i++)
+ {
+ if ((outdata[i * 2] & 0xF0) > 0
+ || (outdata[i * 2 + 1] & 0xF0) > 0)
+ throw new Exception("first 4 bits of data should be 0 if dataSize = 4");
+ realout[i] = (byte)((outdata[i * 2] << 4) | outdata[i * 2 + 1]);
+ }
+ }
+ else
+ {
+ realout = outdata;
+ }
+
+ #region save
+ string ext = "";
+ for (int i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit((char)realout[i]))
+ ext += (char)realout[i];
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ filename = filename.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filename.Substring(filename.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ while (File.Exists(outflr + outfname + ext))
+ outfname += "_";
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.CreateNew));
+ bw.Write(realout);
+ bw.Flush();
+ bw.Close();
+
+ #endregion
+
+ Console.WriteLine("Huffman decompressed {0:s}", filename);
+ //Console.ReadLine();
+ /**/
+ }
+ #endregion
+
+ #region None
+ private static void DecompressNone(string filein, string outflr)
+ {
+ FileStream fstr = new FileStream(filein, FileMode.Open);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Filer larger than 2GB cannot be NONE-compressed files.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ long decomp_size = 0;
+ int i;
+
+ if (br.ReadByte() != NONE_TAG)
+ throw new InvalidDataException(String.Format("File {0:s} is not a valid NONE file, it does not have the NONE-tag as first byte", filein));
+ for (i = 0; i < 3; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size != fstr.Length - 0x04)
+ throw new InvalidDataException("File {0:s} is not a valid NONE file, the decompression size shold be the file size - 4");
+
+ #region save
+ string ext = "";
+ char c;
+ for (i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit(c = (char)br.ReadByte()))
+ ext += c;
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ br.BaseStream.Position -= i == 4 ? 4 : i + 1;
+
+ filein = filein.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ while (File.Exists(outflr + outfname + ext))
+ outfname += "_";
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.CreateNew));
+
+ bw.Write(br.ReadBytes((int)decomp_size));
+
+ bw.Flush();
+ bw.Close();
+
+ #endregion
+
+ Console.WriteLine("NONE-decompressed {0:s}", filein);
+ }
+ #endregion
+
+ #region tag 0x10 LZ77
+ static void DecompressLZ77(string filein, string outflr)
+ {
+ /* Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 1 for LZ77)
+ Bit 8-31 Size of decompressed data
+ Repeat below. Each Flag Byte followed by eight Blocks.
+ Flag data (8bit)
+ Bit 0-7 Type Flags for next 8 Blocks, MSB first
+ Block Type 0 - Uncompressed - Copy 1 Byte from Source to Dest
+ Bit 0-7 One data byte to be copied to dest
+ Block Type 1 - Compressed - Copy N+3 Bytes from Dest-Disp-1 to Dest
+ Bit 0-3 Disp MSBs
+ Bit 4-7 Number of bytes to copy (minus 3)
+ Bit 8-15 Disp LSBs
+ */
+ FileStream fstr = new FileStream(filein, FileMode.Open);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Filer larger than 2GB cannot be LZ-0x10-compressed files.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ long decomp_size = 0, curr_size = 0;
+ int flags, i, j, disp, n;
+ bool flag;
+ byte b;
+ long cdest;
+
+ if (br.ReadByte() != LZ10_TAG)
+ throw new InvalidDataException(String.Format("File {0:s} is not a valid LZ-0x10 file", filein));
+ for (i = 0; i < 3; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed.", filein, MAX_OUTSIZE, decomp_size));
+ else if (decomp_size == 0)
+ for (i = 0; i < 4; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE << 8)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed.", filein, MAX_OUTSIZE, decomp_size));
+
+ if (showAlways)
+ Console.WriteLine("Decompressing {0:s}. (outsize: 0x{1:x})", filein, decomp_size);
+
+ #region decompress
+
+ byte[] outdata = new byte[decomp_size];
+
+ while (curr_size < decomp_size)
+ {
+ try { flags = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }
+ for (i = 0; i < 8; i++)
+ {
+ flag = (flags & (0x80 >> i)) > 0;
+ if (flag)
+ {
+ disp = 0;
+ try { b = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ n = b >> 4;
+ disp = (b & 0x0F) << 8;
+ try { disp |= br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ n += 3;
+ cdest = curr_size;
+ //Console.WriteLine("disp: 0x{0:x}", disp);
+ if (disp > curr_size)
+ throw new Exception("Cannot go back more than already written");
+ for (j = 0; j < n; j++)
+ outdata[curr_size++] = outdata[cdest - disp - 1 + j];
+ //curr_size += len;
+ if (curr_size > decomp_size)
+ {
+ //throw new Exception(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header", filein));
+ //Console.WriteLine(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header; {1:x} > {2:x}.", filein, curr_size, decomp_size));
+ break;
+ }
+ }
+ else
+ {
+ try { b = br.ReadByte(); }
+ catch (EndOfStreamException) { break;}// throw new Exception("Incomplete data"); }
+ try { outdata[curr_size++] = b; }
+ catch (IndexOutOfRangeException) { if (b == 0) break; }
+ //curr_size++;
+ if (curr_size > decomp_size)
+ {
+ //throw new Exception(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header", filein));
+ //Console.WriteLine(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header; {1:x} > {2:x}", filein, curr_size, decomp_size));
+ break;
+ }
+ }
+ }
+
+ }
+
+ try
+ {
+ while (br.ReadByte() == 0) { } // if we read a non-zero, print that there is still some data
+ Console.WriteLine("Too many data in file; current INPOS = {0:x}", br.BaseStream.Position - 1);
+ }
+ catch (EndOfStreamException) { }
+
+ #endregion
+
+ #region save
+ string ext = "";
+ for (i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit((char)outdata[i]))
+ ext += (char)outdata[i];
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ filein = filein.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ while (File.Exists(outflr + outfname + ext))
+ outfname += "_";
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.CreateNew));
+ bw.Write(outdata);
+ bw.Flush();
+ bw.Close();
+
+ #endregion
+
+ Console.WriteLine("LZ-0x10 Decompressed " + filein);
+
+ }
+ #endregion
+
+ #region tag 0x11 LZSS
+ static void Decompress11LZS(string filein, string outflr)
+ {
+ /* Data header (32bit)
+ Bit 0-3 Reserved
+ Bit 4-7 Compressed type (must be 1 for LZ77)
+ Bit 8-31 Size of decompressed data. if 0, the next 4 bytes are decompressed length
+ Repeat below. Each Flag Byte followed by eight Blocks.
+ Flag data (8bit)
+ Bit 0-7 Type Flags for next 8 Blocks, MSB first
+ Block Type 0 - Uncompressed - Copy 1 Byte from Source to Dest
+ Bit 0-7 One data byte to be copied to dest
+ Block Type 1 - Compressed - Copy LEN Bytes from Dest-Disp-1 to Dest
+ If Reserved is 0: - Default
+ Bit 0-3 Disp MSBs
+ Bit 4-7 LEN - 3
+ Bit 8-15 Disp LSBs
+ If Reserved is 1: - Higher compression rates for files with (lots of) long repetitions
+ Bit 4-7 Indicator
+ If Indicator > 1:
+ Bit 0-3 Disp MSBs
+ Bit 4-7 LEN - 1 (same bits as Indicator)
+ Bit 8-15 Disp LSBs
+ If Indicator is 1: A(B CD E)(F GH)
+ Bit 0-3 (LEN - 0x111) MSBs
+ Bit 4-7 Indicator; unused
+ Bit 8-15 (LEN- 0x111) 'middle'-SBs
+ Bit 16-19 Disp MSBs
+ Bit 20-23 (LEN - 0x111) LSBs
+ Bit 24-31 Disp LSBs
+ If Indicator is 0:
+ Bit 0-3 (LEN - 0x11) MSBs
+ Bit 4-7 Indicator; unused
+ Bit 8-11 Disp MSBs
+ Bit 12-15 (LEN - 0x11) LSBs
+ Bit 16-23 Disp LSBs
+ */
+ FileStream fstr = new FileStream(filein, FileMode.Open);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Filer larger than 2GB cannot be LZ-0x11-compressed files.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ int decomp_size = 0, curr_size = 0;
+ int i, j, disp, len;
+ bool flag;
+ byte b1, bt, b2, b3, flags;
+ int cdest;
+
+ int threshold = 1;
+
+ if (br.ReadByte() != LZ11_TAG)
+ throw new InvalidDataException(String.Format("File {0:s} is not a valid LZ-0x11 file", filein));
+ for (i = 0; i < 3; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed. (1)", filein, MAX_OUTSIZE, decomp_size));
+ else if (decomp_size == 0)
+ for (i = 0; i < 4; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE << 8)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed. (2)", filein, MAX_OUTSIZE, decomp_size));
+
+ if (showAlways)
+ Console.WriteLine("Decompressing {0:s}. (outsize: 0x{1:x})", filein, decomp_size);
+
+
+ byte[] outdata = new byte[decomp_size];
+
+
+ while (curr_size < decomp_size)
+ {
+ try { flags = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }
+
+ for (i = 0; i < 8 && curr_size < decomp_size; i++)
+ {
+ flag = (flags & (0x80 >> i)) > 0;
+ if (flag)
+ {
+ try { b1 = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+
+ switch (b1 >> 4)
+ {
+ #region case 0
+ case 0:
+ // ab cd ef
+ // =>
+ // len = abc + 0x11 = bc + 0x11
+ // disp = def
+
+ len = b1 << 4;
+ try { bt = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ len |= bt >> 4;
+ len += 0x11;
+
+ disp = (bt & 0x0F) << 8;
+ try { b2 = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ disp |= b2;
+ break;
+ #endregion
+
+ #region case 1
+ case 1:
+ // ab cd ef gh
+ // =>
+ // len = bcde + 0x111
+ // disp = fgh
+ // 10 04 92 3F => disp = 0x23F, len = 0x149 + 0x11 = 0x15A
+
+ try { bt = br.ReadByte(); b2 = br.ReadByte(); b3 = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+
+ len = (b1 & 0xF) << 12; // len = b000
+ len |= bt << 4; // len = bcd0
+ len |= (b2 >> 4); // len = bcde
+ len += 0x111; // len = bcde + 0x111
+ disp = (b2 & 0x0F) << 8; // disp = f
+ disp |= b3; // disp = fgh
+ break;
+ #endregion
+
+ #region other
+ default:
+ // ab cd
+ // =>
+ // len = a + threshold = a + 1
+ // disp = bcd
+
+ len = (b1 >> 4) + threshold;
+
+ disp = (b1 & 0x0F) << 8;
+ try { b2 = br.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ disp |= b2;
+ break;
+ #endregion
+ }
+
+ if (disp > curr_size)
+ throw new Exception("Cannot go back more than already written");
+
+ cdest = curr_size;
+
+ for (j = 0; j < len && curr_size < decomp_size; j++)
+ outdata[curr_size++] = outdata[cdest - disp - 1 + j];
+
+ if (curr_size > decomp_size)
+ {
+ //throw new Exception(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header", filein));
+ //Console.WriteLine(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header; {1:x} > {2:x}.", filein, curr_size, decomp_size));
+ break;
+ }
+ }
+ else
+ {
+ try { outdata[curr_size++] = br.ReadByte(); }
+ catch (EndOfStreamException) { break; }// throw new Exception("Incomplete data"); }
+
+ if (curr_size > decomp_size)
+ {
+ //throw new Exception(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header", filein));
+ //Console.WriteLine(String.Format("File {0:s} is not a valid LZ77 file; actual output size > output size in header; {1:x} > {2:x}", filein, curr_size, decomp_size));
+ break;
+ }
+ }
+ }
+
+ }
+
+ try
+ {
+ while (br.ReadByte() == 0) { } // if we read a non-zero, print that there is still some data
+ Console.WriteLine("Too much data in file; current INPOS = {0:x}", br.BaseStream.Position - 1);
+ }
+ catch (EndOfStreamException) { }
+
+ #region save
+ string ext = "";
+ for (i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit((char)outdata[i]))
+ ext += (char)outdata[i];
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ filein = filein.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ while (File.Exists(outflr + outfname + ext))
+ outfname += "_";/**/
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.Create));
+ bw.Write(outdata);
+
+ bw.Flush();
+ bw.Close();
+
+ #endregion
+
+ Console.WriteLine("LZ-0x11 Decompressed " + filein);
+
+ }
+ #endregion
+
+ #region tag 0x40 LZ
+ static void DecompressLZ40(string filein, string outflr)
+ {
+ // no NDSTEK-like specification for this one; I seem to not be able to get those right.
+ /*
+ * byte tag; // 0x40
+ * byte[3] decompressedSize;
+ * the rest is the data;
+ *
+ * for each chunk:
+ * - first byte determines which blocks are compressed
+ * - block i is compressed iff:
+ * - the i'th MSB is the last 1-bit in the byte
+ * - OR the i'th MSB is a 0-bit, not directly followed by other 0-bits.
+ * - note that there will never be more than one 0-bit before any 1-bit in this byte
+ * (look at the corresponding code, it may clarify this a bit more)
+ * - then come 8 blocks:
+ * - a non-compressed block is simply one single byte
+ * - a compressed block can have 3 sizes:
+ * - A0 CD EF
+ * -> Length = EF + 0x10, Disp = CDA
+ * - A1 CD EF GH
+ * -> Length = GHEF + 0x110, Disp = CDA
+ * - AB CD (B > 1)
+ * -> Length = B, Disp = CDA
+ * Copy bytes from Dest- to Dest (with similar to the NDSTEK specs)
+ */
+
+
+ FileStream fstr = new FileStream(filein, FileMode.Open);
+ if (fstr.Length > int.MaxValue)
+ throw new Exception("Filer larger than 2GB cannot be LZSS-compressed files.");
+ BinaryReader br = new BinaryReader(fstr);
+
+ int decomp_size = 0, curr_size = 0;
+
+ if (br.ReadByte() != LZ40_TAG)
+ throw new InvalidDataException(String.Format("File {0:s} is not a valid LZSS-11 file", filein));
+ for (int i = 0; i < 3; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed. (1)", filein, MAX_OUTSIZE, decomp_size));
+ else if (decomp_size == 0)
+ for (int i = 0; i < 4; i++)
+ decomp_size += br.ReadByte() << (i * 8);
+ if (decomp_size > MAX_OUTSIZE << 8)
+ throw new Exception(String.Format("{0:s} will be larger than 0x{1:x} (0x{2:x}) and will not be decompressed. (2)", filein, MAX_OUTSIZE, decomp_size));
+
+ if (showAlways)
+ Console.WriteLine("Decompressing {0:s}. (outsize: 0x{1:x})", filein, decomp_size);
+
+
+ byte[] outdata = new byte[decomp_size];
+
+ while (curr_size < decomp_size)
+ {
+ int flag;
+ try { flag = br.ReadByte(); }
+ catch (EndOfStreamException)
+ {
+ Console.WriteLine("Not enough data");
+ break;
+ }
+ int flagB = flag;
+ bool[] compFlags = new bool[8];
+ bool[] fbits = new bool[11];
+ fbits[0] = true;
+ fbits[9] = false;
+ fbits[10] = false;
+
+ // determine which blocks are compressed
+ int b = 0;
+ while (flag > 0)
+ {
+ bool bit = (flag & 0x80) > 0;
+ flag = (flag & 0x7F) << 1;
+ compFlags[b++] = (flag == 0) || !bit;
+ }
+
+ /*
+ Console.WriteLine("Flag: 0x{0:X2}", flagB);
+ Console.Write("-> ( ");
+ for (int i = 0; i < 8; i++)
+ Console.Write(compFlags[i] ? "1," : "0,");
+ Console.WriteLine(")");/**/
+
+ for (int i = 0; i < 8 && curr_size < decomp_size; i++)
+ {
+ if (compFlags[i])
+ {
+ ushort compressed = br.ReadUInt16();
+ // ABCD (or CD AB if read byte-by-byte)
+ // -> D is length
+ // -> ABC is disp
+ int len = compressed & 0x000F;
+ int disp = compressed >> 4;
+
+ // if D == 0, actual format is:
+ // CD AB EF
+ // -> DEF is length - 0x10
+ // -> ABC is disp
+
+ // if D == 1, actual format is:
+ // CD AB EF GH
+ // -> GHEF is length - 0x110
+ // -> ABC is disp
+ if (len == 0)
+ len = br.ReadByte() + 0x10;
+ else if (len == 1)
+ len = br.ReadUInt16() + 0x110;
+
+ if (disp > curr_size)
+ throw new Exception("Cannot go back more than already written "
+ + "(compressed block=0x" + compressed.ToString("X4") + ")\n"
+ + "INPOS = 0x" + (br.BaseStream.Position - 2).ToString("X4"));
+
+ for (int j = 0; j < len; j++)
+ {
+ outdata[curr_size + j] = outdata[curr_size - disp + j];
+ }
+ curr_size += len;
+ }
+ else
+ {
+ outdata[curr_size++] = br.ReadByte();
+ }
+ }
+ }
+
+ try
+ {
+ byte b;
+ while ((b = br.ReadByte()) == 0
+ || b == 0x80) { }
+ // if we read a non-zero up to the end of the file, print that there is still some data
+ // (0x40 compression seems to add 80 00 00 sometimes, so also ignore 0x80-bytes)
+ Console.WriteLine("Too much data in file; current INPOS = {0:x}", br.BaseStream.Position - 1);
+ }
+ catch (EndOfStreamException) { }
+
+ #region save
+ string ext = "";
+ for (int i = 0; i < 4; i++)
+ if (char.IsLetterOrDigit((char)outdata[i]))
+ ext += (char)outdata[i];
+ else
+ break;
+ if (ext.Length == 0)
+ ext = "dat";
+ ext = "." + ext;
+ filein = filein.Replace("\\", "/");
+ outflr = outflr.Replace("\\", "/");
+ string outfname = filein.Substring(filein.LastIndexOf("/") + 1);
+ if (outfname.Contains("."))
+ outfname = outfname.Substring(0, outfname.LastIndexOf('.'));
+
+ if (!outflr.EndsWith("/"))
+ outflr += "/";
+ while (File.Exists(outflr + outfname + ext))
+ outfname += "_";/**/
+
+ BinaryWriter bw = new BinaryWriter(new FileStream(outflr + outfname + ext, FileMode.Create));
+ bw.Write(outdata);
+
+ bw.Flush();
+ bw.Close();
+ #endregion
+
+ Console.WriteLine("LZ-0x40-decompressed " + filein);
+ }
+ #endregion
+
+ #region LZ Overlay
+ private static void DecompressLZOverlay(string filein, string outflr)
+ {
+ // Overlay LZ compression is basically just LZ-0x10 compression.
+ // however the order if reading is reversed: the compression starts at the end of the file.
+ // Assuming we start reading at the end towards the beginning, the format is:
+ /*
+ * u32 extraSize; // decompressed data size = file length (including header) + this value
+ * u8 headerSize;
+ * u24 compressedLength; // can be less than file size (w/o header). If so, the rest of the file is uncompressed.
+ * u8[headerSize-8] padding; // 0xFF-s
+ *
+ * 0x10-like-compressed data follows (without the usual 4-byte header).
+ * The only difference is that 2 should be added to the DISP value in compressed blocks
+ * to get the proper value.
+ * the u32 and u24 are read most significant byte first.
+ * if extraSize is 0, there is no headerSize, decompressedLength or padding.
+ * the data starts immediately, and is uncompressed.
+ *
+ * arm9.bin has 3 extra u32 values at the 'start' (ie: end of the file),
+ * which may be ignored. (and are ignored here) These 12 bytes also should not
+ * be included in the computation of the output size.
+ */
+
+ // save the input file in a buffer, since we need to read backwards.
+ // reverse the array once we're done reading
+ byte[] inbuffer = new byte[0];
+ using (BinaryReader reader = new BinaryReader(File.OpenRead(filein)))
+ {
+ if (filein.EndsWith("arm9.bin"))
+ {
+ // arm9.bin has 0xC extra bytes we don't need at the end.
+ // Without those the format is the same as with overlay files.
+ inbuffer = new byte[reader.BaseStream.Length - 0xC];
+ }
+ else
+ inbuffer = new byte[reader.BaseStream.Length];
+ reader.Read(inbuffer, 0, inbuffer.Length);
+ }
+ Array.Reverse(inbuffer);
+
+ // decompress the input. this results in an output buffer that is reversed,
+ // so reverse that after decompression as well.
+ byte[] outbuffer = new byte[0];
+ using (BinaryReader reader = new BinaryReader(new MemoryStream(inbuffer)))
+ {
+ int extraSize = (reader.ReadByte() << 24)
+ | (reader.ReadByte() << 16)
+ | (reader.ReadByte() << 8)
+ | (reader.ReadByte());
+
+ if (extraSize == 0)
+ {
+ outbuffer = new byte[inbuffer.Length - 4];
+ // if the extra size if 0, there is no overlay compression.
+ reader.Read(outbuffer, 0, outbuffer.Length);
+ }
+ else
+ {
+ byte headerLength = reader.ReadByte();
+ int compressedSize = (reader.ReadByte() << 16)
+ | (reader.ReadByte() << 8)
+ | reader.ReadByte();
+ // skip the padding
+ reader.BaseStream.Position = headerLength;
+
+ outbuffer = new byte[inbuffer.Length + extraSize];
+
+ // decompress the compressed part
+ #region LZ-0x10-like decompression
+ int curr_size = 0;
+ int decomp_size = compressedSize + extraSize;
+ int inpos = 0;
+ byte b;
+ int n, disp, j, cdest;
+ while (inpos < compressedSize && curr_size < decomp_size)
+ {
+ byte flags = reader.ReadByte();
+ for (int i = 0; i < 8; i++)
+ {
+ bool flag = (flags & (0x80 >> i)) > 0;
+ if (flag)
+ {
+ disp = 0;
+ try { b = reader.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ n = b >> 4;
+ disp = (b & 0x0F) << 8;
+ try { disp |= reader.ReadByte(); }
+ catch (EndOfStreamException) { throw new Exception("Incomplete data"); }
+ n += 3;
+ cdest = curr_size;
+
+ inpos += 2;
+
+ disp += 3;
+
+ if (disp > curr_size)
+ {
+ //throw new Exception("Cannot go back more than already written");
+ Console.WriteLine("DISP is too large (0x{0:X}, curr_size=0x{1:X}, length=0x{2:X}); using 1 instead.", disp, curr_size, n);
+ //disp %= curr_size;
+ // HACK. this seems to produce valid files, but isn't the most elegant solution.
+ // although this _could_ be the actual way to use a disp of 2 in this format,
+ // as otherwise the minimum would be 3 (and 0 is undefined, and 1 is less useful).
+ disp = 2;
+ }
+ for (j = 0; j < n; j++)
+ outbuffer[curr_size++] = outbuffer[cdest - disp + j];
+
+ if (curr_size > decomp_size || inpos >= compressedSize)
+ {
+ break;
+ }
+ }
+ else
+ {
+ try { b = reader.ReadByte(); }
+ catch (EndOfStreamException) { break; }
+ try { outbuffer[curr_size++] = b; }
+ catch (IndexOutOfRangeException) { if (b == 0) break; }
+
+ inpos += 1;
+
+ if (curr_size > decomp_size || inpos >= compressedSize)
+ {
+ break;
+ }
+ }
+ }
+ }
+
+ #endregion
+
+ // if there is any uncompressed part, copy that to the buffer as well
+ int decompressedLength = (int)(reader.BaseStream.Length - reader.BaseStream.Position);
+ //Console.WriteLine("outlen-curr_size:0x{0:X}", outbuffer.Length - curr_size);
+ //Console.WriteLine("reader.len-reader.pos:0x{0:X}", decompressedLength);
+ if (decompressedLength > 0)
+ {
+ reader.Read(outbuffer, outbuffer.Length - decompressedLength, decompressedLength);
+ }
+ }
+ }
+ Array.Reverse(outbuffer);
+
+ // write the output to a file. Replace the .bin extension with .ovl.
+ string infname = Path.GetFileName(filein);
+ string outfname = infname.Substring(0, infname.Length - 3) + "ovl";
+ string outfile = Path.Combine(outflr, outfname);
+
+ using (BinaryWriter writer = new BinaryWriter(File.Create(outfile)))
+ {
+ writer.Write(outbuffer);
+ }
+
+ Console.WriteLine("LZ-Overlay compressed " + filein);
+ }
+ #endregion
+
+ #region helper methods
+ private static string byte_to_bits(byte b)
+ {
+ string o = "";
+ for (int i = 0; i < 8; i++)
+ o = (((b & (1 << i)) >> i) & 1) + o;
+ return o;
+ }
+ private static string uint_to_bits(uint u)
+ {
+ string o = "";
+ for (int i = 3; i >-1; i--)
+ o += byte_to_bits((byte)((u & (0xFF << (i * 8))) >> (i * 8)));
+ return o;
+ }
+
+ private static byte peekByte(BinaryReader br)
+ {
+ byte b = br.ReadByte();
+ br.BaseStream.Position--;
+ return b;
+ }
+
+ private static string makeSlashes(string path)
+ {
+ StringBuilder sbin = new StringBuilder(path),
+ sbout = new StringBuilder();
+ char c;
+ while (sbin.Length > 0)
+ {
+ c = sbin[0];
+ sbin.Remove(0, 1);
+ if (c == '\\')
+ sbout.Append('/');
+ else
+ sbout.Append(c);
+ }
+ return sbout.ToString();
+ }
+ #endregion
+
+ }
+
+ class HuffTreeNode
+ {
+ internal static int maxInpos = 0;
+ internal HuffTreeNode node0, node1;
+ internal int data = -1; // [-1,0xFF]
+ ///
+ /// To get a value, provide the last node of a list of bytes < 2.
+ /// the list will be read from back to front.
+ ///
+ internal bool getValue(LinkedListNode code, out int value)
+ {
+ value = data;
+ if (code == null)
+ return node0 == null && node1 == null && data >= 0;
+
+ if(code.Value > 1)
+ throw new Exception(String.Format("the list should be a list of bytes < 2. got:{0:g}", code.Value));
+
+ byte c = code.Value;
+ bool retVal;
+ HuffTreeNode n = c == 0 ? node0 : node1;
+ retVal = n != null && n.getValue(code.Previous, out value);
+ return retVal;
+ }
+
+ internal int this[string code]
+ {
+ get
+ {
+ LinkedList c = new LinkedList();
+ foreach (char ch in code)
+ c.AddFirst((byte)ch);
+ int val = 1;
+ return this.getValue(c.Last, out val) ? val : -1;
+ }
+ }
+
+ internal void parseData(BinaryReader br)
+ {
+ /*
+ * Tree Table (list of 8bit nodes, starting with the root node)
+ Root Node and Non-Data-Child Nodes are:
+ Bit0-5 Offset to next child node,
+ Next child node0 is at (CurrentAddr AND NOT 1)+Offset*2+2
+ Next child node1 is at (CurrentAddr AND NOT 1)+Offset*2+2+1
+ Bit6 Node1 End Flag (1=Next child node is data)
+ Bit7 Node0 End Flag (1=Next child node is data)
+ Data nodes are (when End Flag was set in parent node):
+ Bit0-7 Data (upper bits should be zero if Data Size is less than 8)
+ */
+ this.node0 = new HuffTreeNode();
+ this.node1 = new HuffTreeNode();
+ long currPos = br.BaseStream.Position;
+ byte b = br.ReadByte();
+ long offset = b & 0x3F;
+ bool end0 = (b & 0x80) > 0, end1 = (b & 0x40) > 0;
+ // parse data for node0
+ br.BaseStream.Position = (currPos - (currPos & 1)) + offset * 2 + 2;
+ if (br.BaseStream.Position < maxInpos)
+ {
+ if (end0)
+ node0.data = br.ReadByte();
+ else
+ node0.parseData(br);
+ }
+ // parse data for node1
+ br.BaseStream.Position = (currPos - (currPos & 1)) + offset * 2 + 2 + 1;
+ if (br.BaseStream.Position < maxInpos)
+ {
+ if (end1)
+ node1.data = br.ReadByte();
+ else
+ node1.parseData(br);
+ }
+ // reset position
+ br.BaseStream.Position = currPos;
+ }
+
+ public override string ToString()
+ {
+ if (data < 0 && node0 != null && node1 != null)
+ return "<" + node0.ToString() + ", " + node1.ToString() + ">";
+ else
+ return String.Format("[{0:x}]", data);
+ }
+
+ internal int Depth
+ {
+ get
+ {
+ if (data < 0)
+ return 0;
+ else
+ return 1 + Math.Max(node0.Depth, node1.Depth);
+ }
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Properties/AssemblyInfo.cs b/CSharp/DSDecmp/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..92464d6
--- /dev/null
+++ b/CSharp/DSDecmp/Properties/AssemblyInfo.cs
@@ -0,0 +1,33 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("DSDecmp")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("DSDecmp")]
+[assembly: AssemblyCopyright("Copyright © 2009")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("5c936297-7872-4fc0-88f7-c0289d20b297")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/CSharp/DSDecmp/TestProgram.cs b/CSharp/DSDecmp/TestProgram.cs
new file mode 100644
index 0000000..a52ed48
--- /dev/null
+++ b/CSharp/DSDecmp/TestProgram.cs
@@ -0,0 +1,55 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+
+namespace DSDecmp
+{
+ class TestProgram
+ {
+
+ public static void MainTest(string[] args)
+ {
+ /*
+ new RLE().Compress("tests/rle/testdata.dat", "tests/rle/cmp/testdata.rle.dat");
+ new RLE().Decompress("tests/rle/cmp/testdata.rle.dat", "tests/rle/dec/testdata.elr.dat");
+ /**/
+
+ //Program.Main1(new string[] { "tests/huff/00.dat", "tests/huff/dec2/" });
+ //Console.WriteLine("-----------------------------------------------------");
+ //new Huffman().Decompress("tests/huff/00.dat", "tests/huff/dec/00.ffuh.dat");
+ /**/
+ //new LZ11().Decompress("tests/lz11/game_over_NCGR.cdat", "tests/lz11/dec/game_over.11zl.NCGR");
+
+ //new LZOvl().Decompress("tests/lzovl/overlay_0001.bin", "tests/lzovl/dec/overlay_0001.dat");
+
+ //new LZ10().Decompress("tests/lz10/npc002_LZ.bin", "tests/lz10/dec/npc002.narc");
+ //LZ10.LookAhead = true;
+ //new LZ10().Compress("tests/lz10/dec/npc002.narc", "tests/lz10/cmp/npc002_d.narc.lz");
+ //new LZ10().Decompress("tests/lz10/cmp/npc002_d.narc.lz", "tests/lz10/cmpdec/npc002.narc");
+
+ //LZ11.LookAhead = true;
+ //new LZ11().Compress("tests/lz11/dec/game_over.11zl.NCGR", "tests/lz11/cmp/game_over.NCGR2.lz11");
+ //new LZ11().Decompress("tests/lz11/cmp/game_over.NCGR2.lz11", "tests/lz11/cmpdec/game_over.NCGR");
+
+ //LZOvl.LookAhead = true;
+ //new LZOvl().Compress("tests/lzovl/dec/overlay_0001.dat", "tests/lzovl/cmp/overlay_0001b.bin");
+ //new LZOvl().Decompress("tests/lzovl/cmp/overlay_0001b.bin", "tests/lzovl/cmpdec/overlay_0001.dat");
+
+ //Huffman.CompressBlockSize = Huffman.BlockSize.FOURBIT;
+ //new Huffman().Compress("tests/huff/dec/00.ffuh.dat", "tests/huff/cmp/00.huff4");
+ //new Huffman().Decompress("tests/huff/cmp/00.huff4", "tests/huff/cmpdec/00.dat");
+ //new Huffman().Compress("tests/huff/test.dat", "tests/huff/cmp/test.huff");
+ //new Huffman().Decompress("tests/huff/cmp/test.huff", "tests/huff/cmpdec/test.dat");
+
+ //new LZOvl().Decompress("tests/lzovl2/overlay_0001.bin", "tests/lzovl2/overlay_0001.dat");
+
+ //new LuminousArc().Decompress("tests/Le/advimg00.imb", "tests/Le/dec/advimg00.imb");
+
+ Console.WriteLine("Success?");
+ Console.ReadLine();
+
+ }
+
+ }
+}
diff --git a/CSharp/DSDecmp/Utils/IOUtils.cs b/CSharp/DSDecmp/Utils/IOUtils.cs
new file mode 100644
index 0000000..0641b23
--- /dev/null
+++ b/CSharp/DSDecmp/Utils/IOUtils.cs
@@ -0,0 +1,147 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Reflection;
+using System.IO;
+
+namespace DSDecmp
+{
+ ///
+ /// Class for I/O-related utility methods.
+ ///
+ public static class IOUtils
+ {
+
+ #region byte[] <-> (u)int
+ ///
+ /// Returns a 4-byte unsigned integer as used on the NDS converted from four bytes
+ /// at a specified position in a byte array.
+ ///
+ /// The source of the data.
+ /// The location of the data in the source.
+ /// The indicated 4 bytes converted to uint
+ public static uint ToNDSu32(byte[] buffer, int offset)
+ {
+ return (uint)(buffer[offset]
+ | (buffer[offset + 1] << 8)
+ | (buffer[offset + 2] << 16)
+ | (buffer[offset + 3] << 24));
+ }
+
+ ///
+ /// Returns a 4-byte signed integer as used on the NDS converted from four bytes
+ /// at a specified position in a byte array.
+ ///
+ /// The source of the data.
+ /// The location of the data in the source.
+ /// The indicated 4 bytes converted to int
+ public static int ToNDSs32(byte[] buffer, int offset)
+ {
+ return (int)(buffer[offset]
+ | (buffer[offset + 1] << 8)
+ | (buffer[offset + 2] << 16)
+ | (buffer[offset + 3] << 24));
+ }
+
+ ///
+ /// Converts a u32 value into a sequence of bytes that would make ToNDSu32 return
+ /// the given input value.
+ ///
+ public static byte[] FromNDSu32(uint value)
+ {
+ return new byte[] {
+ (byte)(value & 0xFF),
+ (byte)((value >> 8) & 0xFF),
+ (byte)((value >> 16) & 0xFF),
+ (byte)((value >> 24) & 0xFF)
+ };
+ }
+
+ ///
+ /// Returns a 3-byte integer as used in the built-in compression
+ /// formats in the DS, convrted from three bytes at a specified position in a byte array,
+ ///
+ /// The source of the data.
+ /// The location of the data in the source.
+ /// The indicated 3 bytes converted to an integer.
+ public static int ToNDSu24(byte[] buffer, int offset)
+ {
+ return (int)(buffer[offset]
+ | (buffer[offset + 1] << 8)
+ | (buffer[offset + 2] << 16));
+ }
+ #endregion
+
+ #region Plugin loading
+ ///
+ /// (Attempts to) load compression formats from the given file.
+ ///
+ /// The dll file to load.
+ /// If formats without an empty contrsuctor should get a print.
+ /// A list with an instance of all compression formats found in the given dll file.
+ /// If the given file does not exist.
+ /// If the file could not be loaded.
+ /// If the file is not a valid assembly, or the loaded
+ /// assembly is compiled with a higher version of .NET.
+ internal static IEnumerable LoadCompressionPlugin(string file, bool printFailures = false)
+ {
+ if (file == null)
+ throw new FileNotFoundException("A null-path cannot be loaded.");
+ List newFormats = new List();
+
+ string fullPath = Path.GetFullPath(file);
+
+ Assembly dll = Assembly.LoadFile(fullPath);
+ foreach (Type dllType in dll.GetTypes())
+ {
+ if (dllType.IsSubclassOf(typeof(CompressionFormat))
+ && !dllType.IsAbstract)
+ {
+ try
+ {
+ newFormats.Add(Activator.CreateInstance(dllType) as CompressionFormat);
+ }
+ catch (MissingMethodException)
+ {
+ if (printFailures)
+ Console.WriteLine(dllType + " is a compression format, but does not have a parameterless constructor. Format cannot be loaded from " + fullPath + ".");
+ }
+ }
+ }
+
+ return newFormats;
+ }
+
+ ///
+ /// Loads all compression formats found in the given folder.
+ ///
+ /// The folder to load plugins from.
+ /// A list with an instance of all compression formats found in the given folder.
+ internal static IEnumerable LoadCompressionPlugins(string folder)
+ {
+ List formats = new List();
+
+ foreach (string file in Directory.GetFiles(folder))
+ {
+ try
+ {
+ formats.AddRange(LoadCompressionPlugin(file, false));
+ }
+ catch (Exception) { }
+ }
+
+ return formats;
+ }
+ #endregion
+
+ ///
+ /// Gets the full path to the parent directory of the given path.
+ ///
+ /// The path to get the parent directory path of.
+ /// The full path to the parent directory of teh given path.
+ public static string GetParent(string path)
+ {
+ return Directory.GetParent(path).FullName;
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Utils/LZUtil.cs b/CSharp/DSDecmp/Utils/LZUtil.cs
new file mode 100644
index 0000000..0a0d93e
--- /dev/null
+++ b/CSharp/DSDecmp/Utils/LZUtil.cs
@@ -0,0 +1,62 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp
+{
+ ///
+ /// Utility class for compression using LZ-like compression schemes.
+ ///
+ public static class LZUtil
+ {
+ ///
+ /// Determine the maximum size of a LZ-compressed block starting at newPtr, using the already compressed data
+ /// starting at oldPtr. Takes O(inLength * oldLength) = O(n^2) time.
+ ///
+ /// The start of the data that needs to be compressed.
+ /// The number of bytes that still need to be compressed.
+ /// (or: the maximum number of bytes that _may_ be compressed into one block)
+ /// The start of the raw file.
+ /// The number of bytes already compressed.
+ /// The offset of the start of the longest block to refer to.
+ /// The minimum allowed value for 'disp'.
+ /// The length of the longest sequence of bytes that can be copied from the already decompressed data.
+ public static unsafe int GetOccurrenceLength(byte* newPtr, int newLength, byte* oldPtr, int oldLength, out int disp, int minDisp = 1)
+ {
+ disp = 0;
+ if (newLength == 0)
+ return 0;
+ int maxLength = 0;
+ // try every possible 'disp' value (disp = oldLength - i)
+ for (int i = 0; i < oldLength - minDisp; i++)
+ {
+ // work from the start of the old data to the end, to mimic the original implementation's behaviour
+ // (and going from start to end or from end to start does not influence the compression ratio anyway)
+ byte* currentOldStart = oldPtr + i;
+ int currentLength = 0;
+ // determine the length we can copy if we go back (oldLength - i) bytes
+ // always check the next 'newLength' bytes, and not just the available 'old' bytes,
+ // as the copied data can also originate from what we're currently trying to compress.
+ for (int j = 0; j < newLength; j++)
+ {
+ // stop when the bytes are no longer the same
+ if (*(currentOldStart + j) != *(newPtr + j))
+ break;
+ currentLength++;
+ }
+
+ // update the optimal value
+ if (currentLength > maxLength)
+ {
+ maxLength = currentLength;
+ disp = oldLength - i;
+
+ // if we cannot do better anyway, stop trying.
+ if (maxLength == newLength)
+ break;
+ }
+ }
+ return maxLength;
+ }
+ }
+}
diff --git a/CSharp/DSDecmp/Utils/SimpleReversePrioQueue.cs b/CSharp/DSDecmp/Utils/SimpleReversePrioQueue.cs
new file mode 100644
index 0000000..f2eff33
--- /dev/null
+++ b/CSharp/DSDecmp/Utils/SimpleReversePrioQueue.cs
@@ -0,0 +1,95 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace DSDecmp
+{
+ ///
+ /// Very simplistic implementation of a priority queue that returns items with lowest priority first.
+ /// This is not the most efficient implementation, but required the least work while using the classes
+ /// from the .NET collections, and without requiring importing another dll or several more class files
+ /// in order to make it work.
+ ///
+ /// The type of the priority values.
+ /// The type of item to put into the queue.
+ public class SimpleReversedPrioQueue
+ {
+ private SortedDictionary> items;
+ private int itemCount;
+
+ ///
+ /// Gets the number of items in this queue.
+ ///
+ public int Count { get { return this.itemCount; } }
+
+ ///
+ /// Creates a new, empty reverse priority queue.
+ ///
+ public SimpleReversedPrioQueue()
+ {
+ this.items = new SortedDictionary>();
+ this.itemCount = 0;
+ }
+
+ ///
+ /// Enqueues the given value, using the given priority.
+ ///
+ /// The priority of the value.
+ /// The value to enqueue.
+ public void Enqueue(TPrio priority, TValue value)
+ {
+ if (!this.items.ContainsKey(priority))
+ this.items.Add(priority, new LinkedList());
+ this.items[priority].AddLast(value);
+ this.itemCount++;
+ }
+
+ ///
+ /// Gets the current value with the lowest priority from this queue, without dequeueing the value.
+ ///
+ /// The priority of the returned value.
+ /// The current value with the lowest priority.
+ /// If there are no items left in this queue.
+ public TValue Peek(out TPrio priority)
+ {
+ if (this.itemCount == 0)
+ throw new IndexOutOfRangeException();
+ foreach (KeyValuePair> kvp in this.items)
+ {
+ priority = kvp.Key;
+ return kvp.Value.First.Value;
+ }
+ throw new IndexOutOfRangeException();
+ }
+
+ ///
+ /// Dequeues the current value at the head of thisreverse priority queue.
+ ///
+ /// The priority of the dequeued value.
+ /// The dequeued value, that used to be at the head of this queue.
+ /// If this queue does not contain any items.
+ public TValue Dequeue(out TPrio priority)
+ {
+ if (this.itemCount == 0)
+ throw new IndexOutOfRangeException();
+ LinkedList lowestLL = null;
+ priority = default(TPrio);
+ foreach (KeyValuePair> kvp in this.items)
+ {
+ lowestLL = kvp.Value;
+ priority = kvp.Key;
+ break;
+ }
+
+ TValue returnValue = lowestLL.First.Value;
+ lowestLL.RemoveFirst();
+ // remove unused linked lists. priorities will only grow.
+ if (lowestLL.Count == 0)
+ {
+ this.items.Remove(priority);
+ }
+ this.itemCount--;
+ return returnValue;
+ }
+ }
+}
diff --git a/CSharp/GoldenSunDD/GoldenSunDD.cs b/CSharp/GoldenSunDD/GoldenSunDD.cs
new file mode 100644
index 0000000..98fbc8d
--- /dev/null
+++ b/CSharp/GoldenSunDD/GoldenSunDD.cs
@@ -0,0 +1,251 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+using DSDecmp;
+
+namespace GameFormats
+{
+ public class GoldenSunDD : CompressionFormat
+ {
+
+ public override string ShortFormatString
+ {
+ get { return "GSDD"; }
+ }
+
+ public override string Description
+ {
+ get { return "A variant of the LZ-0x11 scheme found in Golden Sun: Dark Dawn."; }
+ }
+
+ public override string CompressionFlag
+ {
+ get { return "gsdd"; }
+ }
+
+ public override bool SupportsCompression
+ {
+ get { return false; }
+ }
+
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ long streamStart = stream.Position;
+ try
+ {
+ // because of the specific format, and especially since it overlaps with
+ // the LZH8 header format, we'll need to try and decompress the file in
+ // order to check if it is supported.
+ try
+ {
+ using (MemoryStream tempStream = new MemoryStream())
+ this.Decompress(stream, inLength, tempStream);
+ return true;
+ }
+ catch (TooMuchInputException)
+ {
+ // too much input is still OK.
+ return true;
+ }
+ catch
+ {
+ // anything else is not OK.
+ return false;
+ }
+ }
+ finally
+ {
+ stream.Position = streamStart;
+ }
+ }
+
+ #region Decompression method
+ public override long Decompress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ #region format specification
+ // no NDSTEK-like specification for this one; I seem to not be able to get those right.
+ /*
+ * byte tag; // 0x40
+ * byte[3] decompressedSize;
+ * the rest is the data;
+ *
+ * for each chunk:
+ * - first byte determines which blocks are compressed
+ * multiply by -1 to get the proper flags (1->compressed, 0->raw)
+ * - then come 8 blocks:
+ * - a non-compressed block is simply one single byte
+ * - a compressed block can have 3 sizes:
+ * - A0 CD EF
+ * -> Length = EF + 0x10, Disp = CDA
+ * - A1 CD EF GH
+ * -> Length = GHEF + 0x110, Disp = CDA
+ * - AB CD (B > 1)
+ * -> Length = B, Disp = CDA
+ * Copy bytes from Dest- to Dest (with similar to the NDSTEK specs)
+ */
+ #endregion
+
+ long readBytes = 0;
+
+ byte type = (byte)instream.ReadByte();
+ if (type != 0x40)
+ throw new InvalidDataException("The provided stream is not a valid 'LZ-0x40' "
+ + "compressed stream (invalid type 0x" + type.ToString("X") + ")");
+ byte[] sizeBytes = new byte[3];
+ instream.Read(sizeBytes, 0, 3);
+ int decompressedSize = IOUtils.ToNDSu24(sizeBytes, 0);
+ readBytes += 4;
+ if (decompressedSize == 0)
+ {
+ sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ decompressedSize = IOUtils.ToNDSs32(sizeBytes, 0);
+ readBytes += 4;
+ }
+
+ // the maximum 'DISP' is 0xFFF.
+ int bufferLength = 0x1000;
+ byte[] buffer = new byte[bufferLength];
+ int bufferOffset = 0;
+
+ int currentOutSize = 0;
+ int currentBlock = 0;
+ // the expended flag byte
+ bool[] expandedFlags = null;
+ while (currentOutSize < decompressedSize)
+ {
+ // (throws when requested new flags byte is not available)
+ #region Update the mask. If all flag bits have been read, get a new set.
+ // the current mask is the mask used in the previous run. So if it masks the
+ // last flag bit, get a new flags byte.
+ if (currentBlock == 8)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int flags = instream.ReadByte(); readBytes++;
+ if (flags < 0)
+ throw new StreamTooShortException();
+
+ // determine which blocks are compressed
+ int b = 0;
+ expandedFlags = new bool[8];
+ // flags = -flags
+ while (flags > 0)
+ {
+ bool bit = (flags & 0x80) > 0;
+ flags = (flags & 0x7F) << 1;
+ expandedFlags[b++] = (flags == 0) || !bit;
+ }
+
+ currentBlock = 0;
+ }
+ else
+ {
+ currentBlock++;
+ }
+ #endregion
+
+ // bit = 1 <=> compressed.
+ if (expandedFlags[currentBlock])
+ {
+ // (throws when < 2, 3 or 4 bytes are available)
+ #region Get length and displacement('disp') values from next 2, 3 or 4 bytes
+
+ // there are < 2 bytes available when the end is at most 1 byte away
+ if (readBytes + 1 >= inLength)
+ {
+ // make sure the stream is at the end
+ if (readBytes < inLength)
+ {
+ instream.ReadByte(); readBytes++;
+ }
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ }
+ int byte1 = instream.ReadByte(); readBytes++;
+ int byte2 = instream.ReadByte(); readBytes++;
+ if (byte2 < 0)
+ throw new StreamTooShortException();
+
+ int disp, length;
+ disp = (byte1 >> 4) + (byte2 << 4);
+ if (disp > currentOutSize)
+ throw new InvalidDataException("Cannot go back more than already written. "
+ + "DISP = 0x" + disp.ToString("X") + ", #written bytes = 0x" + currentOutSize.ToString("X")
+ + " at 0x" + (instream.Position - 2).ToString("X"));
+
+ switch (byte1 & 0x0F)
+ {
+ case 0:
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte3 = instream.ReadByte(); readBytes++;
+ if (byte3 < 0)
+ throw new StreamTooShortException();
+ length = byte3 + 0x10;
+ break;
+ }
+ case 1:
+ {
+ if (readBytes + 1 >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int byte3 = instream.ReadByte(); readBytes++;
+ int byte4 = instream.ReadByte(); readBytes++;
+ if (byte4 < 0)
+ throw new StreamTooShortException();
+ length = ((byte3 << 8) + byte4) + 0x110;
+ break;
+ }
+ default:
+ {
+ length = byte1 & 0x0F;
+ break;
+ }
+ }
+ #endregion
+
+ int bufIdx = bufferOffset + bufferLength - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = buffer[bufIdx % bufferLength];
+ bufIdx++;
+ outstream.WriteByte(next);
+ buffer[bufferOffset] = next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ currentOutSize += length;
+ }
+ else
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int next = instream.ReadByte(); readBytes++;
+ if (next < 0)
+ throw new StreamTooShortException();
+
+ currentOutSize++;
+ outstream.WriteByte((byte)next);
+ buffer[bufferOffset] = (byte)next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ outstream.Flush();
+ }
+
+ if (readBytes < inLength)
+ {
+ // the input may be 4-byte aligned.
+ if ((readBytes ^ (readBytes & 3)) + 4 < inLength)
+ throw new TooMuchInputException(readBytes, inLength);
+ }
+
+ return decompressedSize;
+ }
+ #endregion
+
+ public override int Compress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/CSharp/GoldenSunDD/GoldenSunDD.csproj b/CSharp/GoldenSunDD/GoldenSunDD.csproj
new file mode 100644
index 0000000..f6ddbf8
--- /dev/null
+++ b/CSharp/GoldenSunDD/GoldenSunDD.csproj
@@ -0,0 +1,57 @@
+
+
+
+ Debug
+ AnyCPU
+ 8.0.30703
+ 2.0
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}
+ Library
+ Properties
+ GoldenSunDD
+ GoldenSunDD
+ v2.0
+ 512
+
+
+
+ true
+ full
+ false
+ ..\PluginDistro\Debug\
+ DEBUG;TRACE
+ prompt
+ 4
+
+
+ pdbonly
+ true
+ ..\PluginDistro\
+ TRACE
+ prompt
+ 4
+
+
+
+
+
+
+
+
+
+
+
+
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}
+ DSDecmp
+
+
+
+
+
\ No newline at end of file
diff --git a/CSharp/GoldenSunDD/Properties/AssemblyInfo.cs b/CSharp/GoldenSunDD/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..a968ab0
--- /dev/null
+++ b/CSharp/GoldenSunDD/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("GoldenSunDD")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("GoldenSunDD")]
+[assembly: AssemblyCopyright("Copyright © 2011")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("15c63bf0-dd3c-4ef2-b3c4-6c6278475ec9")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/CSharp/LuminousArc/LuminousArc.cs b/CSharp/LuminousArc/LuminousArc.cs
new file mode 100644
index 0000000..a3bf4b7
--- /dev/null
+++ b/CSharp/LuminousArc/LuminousArc.cs
@@ -0,0 +1,499 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.IO;
+using DSDecmp;
+
+namespace GameFormats
+{
+ ///
+ /// Compressor/decompressor for the LZE format found in Luminous Arc games. Format specification by Roger Pepitone; http://pastebin.com/qNgSB2f9
+ ///
+ public class LuminousArc : CompressionFormat
+ {
+
+ public override string ShortFormatString
+ {
+ get { return "LZE/Le"; }
+ }
+
+ public override string Description
+ {
+ get { return "A variant of an LZ/RLE scheme found in Luminous Arc games."; }
+ }
+
+ public override string CompressionFlag
+ {
+ get { return "Le"; }
+ }
+
+ public override bool SupportsCompression
+ {
+ get { return true; }
+ }
+
+ private static bool lookAhead = false;
+ ///
+ /// Gets or sets if, when compressing using this format, the optimal compression scheme should be used.
+ ///
+ public static bool LookAhead { get { return lookAhead; } set { lookAhead = value; } }
+
+ /*
+ * An LZE / Le file consists of the following:
+ - A six byte header
+ - A series of blocks
+
+ The header consists of:
+ - 2 bytes: 0x4c, 0x65 ("Le" in ASCII)
+ - 4 bytes: the size of the uncompressed data in little-endian format
+
+ Each block consists of:
+ - 1 byte: the types for the following mini-records
+ (2 bits per type, stored with the first type at the least
+ significant bit)
+ - 4 mini-records
+
+
+ Each mini-record consists of:
+ - If its type is 0:
+ -- 2 bytes BYTE1 BYTE2: Write (3 + (BYTE2 >> 4)) bytes from
+ back (5 + (BYTE1 | ((BYTE2 & 0xf) << 8))) to output
+ - If its type is 1:
+ -- 1 byte BYTE1: Write (2 + (BYTE >> 2)) bytes from
+ back (1 + (BYTE & 3)) to output
+ - If its type is 2:
+ -- 1 byte: (copied to output stream)
+ - If its type is 3:
+ -- 3 bytes: (copied to output stream)
+
+
+ The last block may go over the end; if so, ignore any excess data.
+ */
+
+ #region Method: Supports(Stream, inLength)
+ ///
+ /// Determines if this format may potentially be used to decompress the given stream.
+ /// Does not guarantee success when returning true, but does guarantee failure when returning false.
+ ///
+ public override bool Supports(System.IO.Stream stream, long inLength)
+ {
+ long streamStart = stream.Position;
+ try
+ {
+ if (inLength <= 6) // min 6 byte header
+ return false;
+
+ byte[] header = new byte[2];
+ stream.Read(header, 0, 2);
+ if (header[0] != 'L' || header[1] != 'e')
+ return false;
+
+ byte[] outLength = new byte[4];
+ stream.Read(outLength, 0, 4);
+ if (IOUtils.ToNDSu32(outLength, 0) == 0)
+ return inLength == 6;
+
+ // as long as the magic is OK, anything else is fine for now. (for this superficial check)
+ return true;
+ }
+ finally
+ {
+ stream.Position = streamStart;
+ }
+ }
+ #endregion
+
+ #region Method: Decompress(instream, inLength, outstream)
+ ///
+ /// Decompresses the given stream using the LZE/Le compression format.
+ ///
+ public override long Decompress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ long readBytes = 0;
+
+ byte[] magic = new byte[2];
+ instream.Read(magic, 0, 2);
+ if (magic[0] != 'L' || magic[1] != 'e')
+ throw new InvalidDataException("The provided stream is not a valid LZE (Le) "
+ + "compressed stream (invalid magic '" + (char)magic[0] + (char)magic[1] + "')");
+ byte[] sizeBytes = new byte[4];
+ instream.Read(sizeBytes, 0, 4);
+ uint decompressedSize = IOUtils.ToNDSu32(sizeBytes, 0);
+ readBytes += 4;
+
+ // the maximum 'DISP-5' is 0xFFF.
+ int bufferLength = 0xFFF + 5;
+ byte[] buffer = new byte[bufferLength];
+ int bufferOffset = 0;
+
+
+ int currentOutSize = 0;
+ int flags = 0, mask = 3;
+ while (currentOutSize < decompressedSize)
+ {
+ // (throws when requested new flags byte is not available)
+ #region Update the mask. If all flag bits have been read, get a new set.
+ // the current mask is the mask used in the previous run. So if it masks the
+ // last flag bit, get a new flags byte.
+ if (mask == 3)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ flags = instream.ReadByte(); readBytes++;
+ if (flags < 0)
+ throw new StreamTooShortException();
+ mask = 0xC0;
+ }
+ else
+ {
+ mask >>= 2;
+ flags >>= 2;
+ }
+ #endregion
+
+ switch (flags & 0x3)
+ {
+ case 0:
+ #region 0 -> LZ10-like format
+ {
+ #region Get length and displacement('disp') values from next 2 bytes
+ // there are < 2 bytes available when the end is at most 1 byte away
+ if (readBytes + 1 >= inLength)
+ {
+ // make sure the stream is at the end
+ if (readBytes < inLength)
+ {
+ instream.ReadByte(); readBytes++;
+ }
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ }
+ int byte1 = instream.ReadByte(); readBytes++;
+ int byte2 = instream.ReadByte(); readBytes++;
+ if (byte2 < 0)
+ throw new StreamTooShortException();
+
+ // the number of bytes to copy
+ int length = byte2 >> 4;
+ length += 3;
+
+ // from where the bytes should be copied (relatively)
+ int disp = ((byte2 & 0x0F) << 8) | byte1;
+ disp += 5;
+
+ if (disp > currentOutSize)
+ throw new InvalidDataException("Cannot go back more than already written. "
+ + "DISP = 0x" + disp.ToString("X") + ", #written bytes = 0x" + currentOutSize.ToString("X")
+ + " at 0x" + (instream.Position - 2).ToString("X"));
+ #endregion
+
+ int bufIdx = bufferOffset + bufferLength - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = buffer[bufIdx % bufferLength];
+ bufIdx++;
+ outstream.WriteByte(next);
+ buffer[bufferOffset] = next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ currentOutSize += length;
+
+ break;
+ }
+ #endregion
+ case 1:
+ #region 1 -> compact LZ10/RLE-like format
+ {
+ #region Get length and displacement('disp') values from next byte
+ // there are < 2 bytes available when the end is at most 1 byte away
+ if (readBytes >= inLength)
+ {
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ }
+ int b = instream.ReadByte(); readBytes++;
+ if (b < 0)
+ throw new StreamTooShortException();
+
+ // the number of bytes to copy
+ int length = b >> 2;
+ length += 2;
+
+ // from where the bytes should be copied (relatively)
+ int disp = (b & 0x03);
+ disp += 1;
+
+ if (disp > currentOutSize)
+ throw new InvalidDataException("Cannot go back more than already written. "
+ + "DISP = 0x" + disp.ToString("X") + ", #written bytes = 0x" + currentOutSize.ToString("X")
+ + " at 0x" + (instream.Position - 1).ToString("X"));
+ #endregion
+
+ int bufIdx = bufferOffset + bufferLength - disp;
+ for (int i = 0; i < length; i++)
+ {
+ byte next = buffer[bufIdx % bufferLength];
+ bufIdx++;
+ outstream.WriteByte(next);
+ buffer[bufferOffset] = next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ currentOutSize += length;
+ break;
+ }
+ #endregion
+ case 2:
+ #region 2 -> copy 1 byte
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int next = instream.ReadByte(); readBytes++;
+ if (next < 0)
+ throw new StreamTooShortException();
+
+ currentOutSize++;
+ outstream.WriteByte((byte)next);
+ buffer[bufferOffset] = (byte)next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ break;
+ }
+ #endregion
+ case 3:
+ #region 3 -> copy 3 bytes
+ {
+ for (int i = 0; i < 3; i++)
+ {
+ if (readBytes >= inLength)
+ throw new NotEnoughDataException(currentOutSize, decompressedSize);
+ int next = instream.ReadByte(); readBytes++;
+ if (next < 0)
+ throw new StreamTooShortException();
+
+ currentOutSize++;
+ outstream.WriteByte((byte)next);
+ buffer[bufferOffset] = (byte)next;
+ bufferOffset = (bufferOffset + 1) % bufferLength;
+ }
+ break;
+ }
+ #endregion
+ default:
+ throw new Exception("BUG: Mask is not 2 bits long!");
+ }
+
+ outstream.Flush();
+ }
+
+ if (readBytes < inLength)
+ {
+ // the input may be 4-byte aligned.
+ if ((readBytes ^ (readBytes & 3)) + 4 < inLength)
+ throw new TooMuchInputException(readBytes, inLength);
+ // (this happens rather often for Le files?)
+ }
+
+ return decompressedSize;
+ }
+ #endregion
+
+ ///
+ /// Checks if the given aguments have the '-opt' option, which makes this format
+ /// compress using (near-)optimal compression instead of the original compression algorithm.
+ ///
+ public override int ParseCompressionOptions(string[] args)
+ {
+ LookAhead = false;
+ if (args.Length > 0)
+ if (args[0] == "-opt")
+ {
+ LookAhead = true;
+ return 1;
+ }
+ return 0;
+ }
+
+ public unsafe override int Compress(System.IO.Stream instream, long inLength, System.IO.Stream outstream)
+ {
+ // block type 0: stores at most 3+0xF = 0x12 = 18 bytes (in 2 bytes)
+ // block type 1: stores at most 2+0x3F = 0x41 = 65 bytes (in 1 byte)
+ // block type 2: 1 raw byte
+ // block type 3: 3 raw bytes
+
+ if (LookAhead)
+ return CompressWithLA(instream, inLength, outstream);
+
+
+ // save the input data in an array to prevent having to go back and forth in a file
+ byte[] indata = new byte[inLength];
+ int numReadBytes = instream.Read(indata, 0, (int)inLength);
+ if (numReadBytes != inLength)
+ throw new StreamTooShortException();
+
+ // write the compression head first
+ outstream.WriteByte((byte)'L');
+ outstream.WriteByte((byte)'e');
+ outstream.WriteByte((byte)(inLength & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 8) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 16) & 0xFF));
+ outstream.WriteByte((byte)((inLength >> 24) & 0xFF));
+
+ int compressedLength = 6;
+
+ fixed (byte* instart = &indata[0])
+ {
+ // we do need to buffer the output, as the first byte indicates which blocks are compressed.
+ // this version does not use a look-ahead, so we do not need to buffer more than 4 blocks at a time.
+ // (a block is at most 3 bytes long)
+ byte[] outbuffer = new byte[4 * 3 + 1];
+ outbuffer[0] = 0;
+ int bufferlength = 1, bufferedBlocks = 0;
+ int readBytes = 0;
+
+ int cacheByte = -1;
+
+ while (readBytes < inLength)
+ {
+ #region If 4 blocks are bufferd, write them and reset the buffer
+ // we can only buffer 4 blocks at a time.
+ if (bufferedBlocks == 4)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+ }
+ #endregion
+
+ // type 0: 3 <= len <= 18; 5 <= disp <= 0x1004
+ // type 1: 2 <= len <= 65; 1 <= disp <= 4
+ // type 2: 1 raw byte
+ // type 3: 3 raw bytes
+
+ // check if we can compress it using type 1 first (only 1 byte-long block)
+ int disp;
+ int oldLength = Math.Min(readBytes, 0x1004);
+ int length = LZUtil.GetOccurrenceLength(instart + readBytes, (int)Math.Min(inLength - readBytes, 65),
+ instart + readBytes - oldLength, oldLength, out disp, 1);
+ if (disp >= 1 && ((disp <= 4 && length >= 2) || (disp >= 5 && length >= 3)))
+ {
+ if (cacheByte >= 0)
+ {
+ // write a single raw byte block (the previous byte could not be the start of any compressed block)
+ outbuffer[bufferlength++] = (byte)(cacheByte & 0xFF);
+ outbuffer[0] |= (byte)(2 << (bufferedBlocks * 2));
+ cacheByte = -1;
+ bufferedBlocks++;
+ // the block set may be full; just retry this iteration.
+ continue;
+ }
+ if (disp >= 5)
+ {
+ #region compress using type 0
+
+ // type 0: store len/disp in 2 bytes:
+ // AB CD, with len = C + 3, disp = DAB + 5
+
+ // make sure we do not try to compress more than fits into the block
+ length = Math.Min(length, 0xF + 3);
+
+ readBytes += length;
+
+ outbuffer[bufferlength++] = (byte)((disp - 5) & 0xFF);
+ outbuffer[bufferlength] = (byte)(((disp - 5) >> 8) & 0xF);
+ outbuffer[bufferlength++] |= (byte)(((length - 3) & 0xF) << 4);
+
+ #endregion
+ }
+ else // 1 <= disp <= 4
+ {
+ #region compress using type 1
+
+ // type 1: store len/disp in 1 byte:
+ // ABCDEFGH, wih len = ABCDEF + 2, disp = GH + 1
+
+ readBytes += length;
+
+ outbuffer[bufferlength] = (byte)(((length - 2) << 2) & 0xFC);
+ outbuffer[bufferlength] |= (byte)((disp - 1) & 0x3);
+ bufferlength++;
+
+ outbuffer[0] |= (byte)(1 << (bufferedBlocks * 2));
+
+ #endregion
+ }
+ }
+ else
+ {
+ if (cacheByte < 0)
+ {
+ // first fail? remember byte, try to compress starting at next byte
+ cacheByte = *(instart + (readBytes++));
+ continue;
+ }
+ else
+ {
+ // 2 consecutive fails -> store 3 raw bytes (type 3) if possible.
+ if (inLength - readBytes >= 2)
+ {
+ outbuffer[bufferlength++] = (byte)(cacheByte & 0xFF);
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ outbuffer[bufferlength++] = *(instart + (readBytes++));
+ outbuffer[0] |= (byte)(3 << (bufferedBlocks * 2));
+ cacheByte = -1;
+ }
+ else
+ {
+ // there are only two bytes remaining (incl. the cched byte)
+ // so write the cached byte first as single raw byte.
+ // keep the next/last byte as new cache, since the block buffer may be full.
+ outbuffer[bufferlength++] = (byte)(cacheByte & 0xFF);
+ outbuffer[0] |= (byte)(2 << (bufferedBlocks * 2));
+ cacheByte = *(instart + (readBytes++));
+ }
+ }
+ }
+
+ bufferedBlocks++;
+ }
+
+ // there may be one cache-byte left.
+ if (cacheByte >= 0)
+ {
+ // if the current set of blocks is full, empty it first
+ if (bufferedBlocks == 4)
+ {
+ #region empty block buffer
+
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ // reset the buffer
+ outbuffer[0] = 0;
+ bufferlength = 1;
+ bufferedBlocks = 0;
+
+ #endregion
+ }
+
+ outbuffer[bufferlength++] = (byte)(cacheByte & 0xFF);
+ cacheByte = -1;
+ outbuffer[0] |= (byte)(2 << (bufferedBlocks * 2));
+ bufferedBlocks++;
+ }
+
+ // copy any remaining blocks to the output
+ if (bufferedBlocks > 0)
+ {
+ outstream.Write(outbuffer, 0, bufferlength);
+ compressedLength += bufferlength;
+ }
+ }
+
+ return compressedLength;
+ }
+
+ private unsafe int CompressWithLA(Stream instream, long inLength, Stream outstream)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/CSharp/LuminousArc/LuminousArc.csproj b/CSharp/LuminousArc/LuminousArc.csproj
new file mode 100644
index 0000000..2e433b0
--- /dev/null
+++ b/CSharp/LuminousArc/LuminousArc.csproj
@@ -0,0 +1,59 @@
+
+
+
+ Debug
+ AnyCPU
+ 8.0.30703
+ 2.0
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}
+ Library
+ Properties
+ LuminousArc
+ LuminousArc
+ v2.0
+ 512
+
+
+
+ true
+ full
+ false
+ ..\PluginDistro\Debug\
+ DEBUG;TRACE
+ prompt
+ 4
+ true
+
+
+ pdbonly
+ true
+ ..\PluginDistro\
+ TRACE
+ prompt
+ 4
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}
+ DSDecmp
+
+
+
+
+
\ No newline at end of file
diff --git a/CSharp/LuminousArc/Properties/AssemblyInfo.cs b/CSharp/LuminousArc/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..59e9e8f
--- /dev/null
+++ b/CSharp/LuminousArc/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("LuminousArc")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("LuminousArc")]
+[assembly: AssemblyCopyright("Copyright © 2011")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("d74a7ef8-8f65-4ed0-ae05-a4a97130f047")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/CSharp/PluginDistro/DSDecmp.xml b/CSharp/PluginDistro/DSDecmp.xml
new file mode 100644
index 0000000..e1f120f
--- /dev/null
+++ b/CSharp/PluginDistro/DSDecmp.xml
@@ -0,0 +1,1140 @@
+
+
+
+ DSDecmp
+
+
+
+
+ An exception indication that the input has more data than required in order
+ to decompress it. This may indicate that more sub-files are present in the file.
+
+
+
+
+ Creates a new exception indicating that the input has more data than necessary for
+ decompressing th stream. It may indicate that other data is present after the compressed
+ stream.
+
+ The number of bytes read by the decompressor.
+ The indicated length of the input stream.
+
+
+
+ Gets the number of bytes read by the decompressed to decompress the stream.
+
+
+
+
+ An exception indicating that the file cannot be compressed, because the decompressed size
+ cannot be represented in the current compression format.
+
+
+
+
+ Creates a new exception that indicates that the input is too big to be compressed.
+
+
+
+
+ Class for I/O-related utility methods.
+
+
+
+
+ Returns a 4-byte unsigned integer as used on the NDS converted from four bytes
+ at a specified position in a byte array.
+
+ The source of the data.
+ The location of the data in the source.
+ The indicated 4 bytes converted to uint
+
+
+
+ Returns a 4-byte signed integer as used on the NDS converted from four bytes
+ at a specified position in a byte array.
+
+ The source of the data.
+ The location of the data in the source.
+ The indicated 4 bytes converted to int
+
+
+
+ Converts a u32 value into a sequence of bytes that would make ToNDSu32 return
+ the given input value.
+
+
+
+
+ Returns a 3-byte integer as used in the built-in compression
+ formats in the DS, convrted from three bytes at a specified position in a byte array,
+
+ The source of the data.
+ The location of the data in the source.
+ The indicated 3 bytes converted to an integer.
+
+
+
+ (Attempts to) load compression formats from the given file.
+
+ The dll file to load.
+ If formats without an empty contrsuctor should get a print.
+ A list with an instance of all compression formats found in the given dll file.
+ If the given file does not exist.
+ If the file could not be loaded.
+ If the file is not a valid assembly, or the loaded
+ assembly is compiled with a higher version of .NET.
+
+
+
+ Loads all compression formats found in the given folder.
+
+ The folder to load plugins from.
+ A list with an instance of all compression formats found in the given folder.
+
+
+
+ Gets the full path to the parent directory of the given path.
+
+ The path to get the parent directory path of.
+ The full path to the parent directory of teh given path.
+
+
+
+ Base class for all compression formats.
+
+
+
+
+ Checks if the decompressor for this format supports the given file. Assumes the
+ file exists. Returns false when it is certain that the given file is not supported.
+ False positives may occur, as this method should not do any decompression, and
+ may mis-interpret a similar file format as compressed.
+
+ The name of the file to check.
+ False if the file can certainly not be decompressed using this decompressor.
+ True if the file may potentially be decompressed using this decompressor.
+
+
+
+ Checks if the decompressor for this format supports the data from the given stream.
+ Returns false when it is certain that the given data is not supported.
+ False positives may occur, as this method should not do any decompression, and may
+ mis-interpret a similar data format as compressed.
+
+ The stream that may or may not contain compressed data. The
+ position of this stream may change during this call, but will be returned to its
+ original position when the method returns.
+ The length of the input stream.
+ False if the data can certainly not be decompressed using this decompressor.
+ True if the data may potentially be decompressed using this decompressor.
+
+
+
+ Decompresses the given file, writing the deocmpressed data to the given output file.
+ The output file will be overwritten if it already exists.
+ Assumes Supports(infile)
returns true
.
+
+ The file to decompress.
+ The target location of the decompressed file.
+
+
+
+ Decompresses the given stream, writing the decompressed data to the given output stream.
+ Assumes Supports(instream)
returns true
.
+ After this call, the input stream will be positioned at the end of the compressed stream,
+ or at the initial position + inLength
, whichever comes first.
+
+ The stream to decompress. At the end of this method, the position
+ of this stream is directly after the compressed data.
+ The length of the input data. Not necessarily all of the
+ input data may be read (if there is padding, for example), however never more than
+ this number of bytes is read from the input stream.
+ The stream to write the decompressed data to.
+ The length of the output data.
+ When the given length of the input data
+ is not enough to properly decompress the input.
+
+
+
+ Compresses the given input file, and writes the compressed data to the given
+ output file.
+
+ The file to compress.
+ The file to write the compressed data to.
+ The size of the compressed file. If -1, the file could not be compressed.
+
+
+
+ Compresses the next inLength
bytes from the input stream,
+ and writes the compressed data to the given output stream.
+
+ The stream to read plaintext data from.
+ The length of the plaintext data.
+ The stream to write the compressed data to.
+ The size of the compressed stream. If -1, the file could not be compressed.
+
+
+
+ Parses any input specific for this format. Does nothing by default.
+
+ Any arguments that may be used by the format.
+ The number of input arguments consumed by this format.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Gets if this format supports decompressing a file.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Utility class for compression using LZ-like compression schemes.
+
+
+
+
+ Determine the maximum size of a LZ-compressed block starting at newPtr, using the already compressed data
+ starting at oldPtr. Takes O(inLength * oldLength) = O(n^2) time.
+
+ The start of the data that needs to be compressed.
+ The number of bytes that still need to be compressed.
+ (or: the maximum number of bytes that _may_ be compressed into one block)
+ The start of the raw file.
+ The number of bytes already compressed.
+ The offset of the start of the longest block to refer to.
+ The minimum allowed value for 'disp'.
+ The length of the longest sequence of bytes that can be copied from the already decompressed data.
+
+
+
+ Base class for Nitro-based decompressors. Uses the 1-byte magic and 3-byte decompression
+ size format.
+
+
+
+
+ If true, Nitro Decompressors will not decompress files that have a decompressed
+ size (plaintext size) larger than MaxPlaintextSize.
+
+
+
+
+ The maximum allowed size of the decompressed file (plaintext size) allowed for Nitro
+ Decompressors. Only used when SkipLargePlaintexts = true.
+
+
+
+
+ The first byte of every file compressed with the format for this particular
+ Nitro Dcompressor instance.
+
+
+
+
+ Creates a new instance of the Nitro Compression Format base class.
+
+ The expected first byte of the file for this format.
+
+
+
+ Checks if the first four (or eight) bytes match the format used in nitro compression formats.
+
+
+
+
+ A format that is composed of multiple formats.
+ When compressing, the input is compressed using the best contained format.
+ When decompressing, all contained formats will try to decompress the file, until one succeeds.
+
+
+
+
+ The actual list of formats this format is somposed of.
+
+
+
+
+ Creates a new composite format based on the given sequence of formats.
+
+
+
+
+ Creates a new composite format based on the given formats.
+
+
+
+
+ Checks if any of the contained formats supports the given input.
+
+
+
+
+ Attempts to decompress the given input by letting all contained formats
+ try to decompress the input.
+
+
+
+
+ Compresses the given input using the contained format that yields the best results in terms of
+ size reduction.
+
+
+
+
+ Handles the compression options for each of the contained compression formats.
+
+
+
+
+ Gets the ShortFormatString of the last CompressionFormat that was used to compress input.
+
+
+
+
+ An exception thrown by the compression or decompression function, indicating that the
+ given input length was too large for the given input stream.
+
+
+
+
+ Creates a new exception that indicates that the stream was shorter than the given input length.
+
+
+
+
+ Compressor and decompressor for the LZ-0x11 format used in many of the games for the
+ newer Nintendo consoles and handhelds.
+
+
+
+
+ Creates a new instance of the LZ-11 compression format.
+
+
+
+
+ Checks if the given aguments have the '-opt' option, which makes this format
+ compress using (near-)optimal compression instead of the original compression algorithm.
+
+
+
+
+ Decompresses the input using the LZ-11 compression scheme.
+
+
+
+
+ Compresses the input using the 'original', unoptimized compression algorithm.
+ This algorithm should yield files that are the same as those found in the games.
+ (delegates to the optimized method if LookAhead is set)
+
+
+
+
+ Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ as the flag-bytes are not taken into account.
+
+
+
+
+ Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ This takes O(n^2) time, although in practice it will often be O(n^3) since one of the constants is 0x10110
+ (the maximum length of a compressed block)
+
+ The data to compress.
+ The length of the data to compress.
+ The optimal 'length' of the compressed blocks. For each byte in the input data,
+ this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ The 'disp' values of the compressed blocks. May be 0, in which case the
+ corresponding length will never be anything other than 1.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ with the LZ-11 format. The default is false, which is what is used in the original
+ implementation.
+
+
+
+
+ Compressor and decompressor for the LZ-0x10 format used in many of the games for the
+ newer Nintendo consoles and handhelds.
+
+
+
+
+ Creates a new instance of the LZ-10 compression format.
+
+
+
+
+ Checks if the given aguments have the '-opt' option, which makes this format
+ compress using (near-)optimal compression instead of the original compression algorithm.
+
+
+
+
+ Decompress a stream that is compressed in the LZ-10 format.
+
+ The compressed stream.
+ The length of the input stream.
+ The output stream, where the decompressed data is written to.
+
+
+
+ Compresses the input using the 'original', unoptimized compression algorithm.
+ This algorithm should yield files that are the same as those found in the games.
+ (delegates to the optimized method if LookAhead is set)
+
+
+
+
+ Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ as the flag-bytes are not taken into account.
+
+
+
+
+ Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ This takes O(n^2) time.
+
+ The data to compress.
+ The length of the data to compress.
+ The optimal 'length' of the compressed blocks. For each byte in the input data,
+ this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ The 'disp' values of the compressed blocks. May be 0, in which case the
+ corresponding length will never be anything other than 1.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ with the LZ-10 format. The default is false, which is what is used in the original
+ implementation.
+
+
+
+
+ Compressor and decompressor for the RLE format used in several of the games for the
+ newer Nintendo consoles and handhelds.
+
+
+
+
+ Creates a new instance of the RLE compression format.
+
+
+
+
+ Decompresses the input using the RLE compression scheme.
+
+
+
+
+ Compresses the input using the RLE compression scheme.
+
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ The LZ-Overlay compression format. Compresses part of the file from end to start.
+ Is used for the 'overlay' files in NDS games, as well as arm9.bin.
+ Note that the last 12 bytes should not be included in the 'inLength' argument when
+ decompressing arm9.bin. This is done automatically if a file is given instead of a stream.
+
+
+
+
+ Checks if the given aguments have the '-opt' option, which makes this format
+ compress using (near-)optimal compression instead of the original compression algorithm.
+
+
+
+
+ Checks if this format supports decompressing the given file.
+
+
+
+
+ Checks if this format supports decompressing the given input.
+
+
+
+
+ Decompresses the given input file to the given output file using the LZ-Overlay compression format.
+
+
+
+
+ Decompresses the given input using the LZ-Overlay compression scheme.
+
+
+
+
+ Compresses the input using the LZ-Overlay compression scheme.
+
+
+
+
+ Compresses the given input stream with the LZ-Ovl compression, but compresses _forward_
+ instad of backwards.
+
+ The input stream to compress.
+ The length of the input stream.
+ The stream to write to.
+
+
+
+ Variation of the original compression method, making use of Dynamic Programming to 'look ahead'
+ and determine the optimal 'length' values for the compressed blocks. Is not 100% optimal,
+ as the flag-bytes are not taken into account.
+
+
+
+
+ Gets the optimal compression lengths for each start of a compressed block using Dynamic Programming.
+ This takes O(n^2) time.
+
+ The data to compress.
+ The length of the data to compress.
+ The optimal 'length' of the compressed blocks. For each byte in the input data,
+ this value is the optimal 'length' value. If it is 1, the block should not be compressed.
+ The 'disp' values of the compressed blocks. May be less than 3, in which case the
+ corresponding length will never be anything other than 1.
+
+
+
+ Gets the 'optimal' length of the compressed part of the file.
+ Or rather: the length in such a way that compressing any more will not
+ result in a shorter file.
+
+ The lengths of the compressed blocks, as gotten from GetOptimalCompressionLengths.
+ The 'optimal' length of the compressed part of the file.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Sets the flag that determines if 'look-ahead'/DP should be used when compressing
+ with the LZ-Ovl format. The default is false, which is what is used in the original
+ implementation.
+
+
+
+
+ (Attempts to) Compress the given input to the given output, using the given format.
+
+ The I/O arguments from the program input.
+ The desired format to compress with.
+
+
+
+ Parses the IO arguments of the input.
+
+ The arguments to parse.
+ If the arguments are used for compression. If not, decompression is assumed. (used for default output folder name)
+ The files to handle as input.
+ The directory to save the handled files in. If this is null,
+ the files should be overwritten. If this does not exist, it is the output file
+ (the input may only contain one file if that si the case).
+ If files that cannot be handled (properly) should be copied to the output directory.
+ True iff parsing of the arguments succeeded.
+
+
+
+ Guess the extension of a file by looking at the given magic bytes of a file.
+ If they are alphanumeric (without accents), they could indicate the type of file.
+ If no sensible extension could be found from the magic bytes, the given default extension is returned.
+
+
+
+
+ Copies the source file to the destination path.
+
+
+
+
+ Gets the compression format corresponding to the given format string.
+
+
+
+
+ Gets a sequence over all compression formats currently supported; both built-in and plugin-based.
+
+
+
+
+ Gets a sequence over all built-in compression formats.
+
+ If the built-in composite formats should also be part of the sequence.
+
+
+
+ Gets a sequence over all formats that can be used from plugins.
+
+
+
+
+ Gets the first item from the given sequence, or the default value of the type in the sequence
+ if it is empty.
+
+
+
+
+ An exception that is thrown by the decompression functions when there
+ is not enough data available in order to properly decompress the input.
+
+
+
+
+ Creates a new NotEnoughDataException.
+
+ The actual number of written bytes.
+ The desired number of written bytes.
+
+
+
+ Gets the actual number of written bytes.
+
+
+
+
+ Gets the number of bytes that was supposed to be written.
+
+
+
+
+ Very simplistic implementation of a priority queue that returns items with lowest priority first.
+ This is not the most efficient implementation, but required the least work while using the classes
+ from the .NET collections, and without requiring importing another dll or several more class files
+ in order to make it work.
+
+ The type of the priority values.
+ The type of item to put into the queue.
+
+
+
+ Creates a new, empty reverse priority queue.
+
+
+
+
+ Enqueues the given value, using the given priority.
+
+ The priority of the value.
+ The value to enqueue.
+
+
+
+ Gets the current value with the lowest priority from this queue, without dequeueing the value.
+
+ The priority of the returned value.
+ The current value with the lowest priority.
+ If there are no items left in this queue.
+
+
+
+ Dequeues the current value at the head of thisreverse priority queue.
+
+ The priority of the dequeued value.
+ The dequeued value, that used to be at the head of this queue.
+ If this queue does not contain any items.
+
+
+
+ Gets the number of items in this queue.
+
+
+
+
+ A composite format with all formats supported natively by the GBA.
+
+
+
+
+ Creates a new instance of the format composed of all native GBA compression formats.
+
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ A composite format with all formats supported natively by the NDS (but not LZ-Overlay)
+
+
+
+
+ Creates a new instance of the format composed of all native NDS compression formats.
+
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Compressor and decompressor for the Huffman format used in many of the games for the
+ newer Nintendo consoles and handhelds.
+
+
+
+
+ Creates a new generic instance of the Huffman compression format.
+
+ The block size used.
+
+
+
+ Decompresses the given stream, writing the decompressed data to the given output stream.
+ Assumes Supports(instream)
returns true
.
+ After this call, the input stream will be positioned at the end of the compressed stream,
+ or at the initial position + inLength
, whichever comes first.
+
+ The stream to decompress. At the end of this method, the position
+ of this stream is directly after the compressed data.
+ The length of the input data. Not necessarily all of the
+ input data may be read (if there is padding, for example), however never more than
+ this number of bytes is read from the input stream.
+ The stream to write the decompressed data to.
+ The length of the output data.
+ When the given length of the input data
+ is not enough to properly decompress the input.
+
+
+
+ Gets the tree node with the lowest priority (frequency) from the leaf and node queues.
+ If the priority is the same for both head items in the queues, the node from the leaf queue is picked.
+
+
+
+
+ Sets the block size used when using the Huffman format to compress.
+
+
+
+
+ Gets if this format supports compression. Always returns true.
+
+
+
+
+ The possible data sizes used in Huffman compression formats on the GBA/NDS.
+
+
+
+
+ Each data block is four bits long.
+
+
+
+
+ Each data block is eight bits long.
+
+
+
+
+ A single node in a Huffman tree.
+
+
+
+
+ The data contained in this node. May not mean anything when isData == false
+
+
+
+
+ A flag indicating if this node has been filled.
+
+
+
+
+ A flag indicating if this node contains data. If not, this is not a leaf node.
+
+
+
+
+ The child of this node at side 0
+
+
+
+
+ The child of this node at side 1
+
+
+
+
+ The index of this node in the array for building the proper ordering.
+ If -1, this node has not yet been placed in the array.
+
+
+
+
+ Manually creates a new node for a huffman tree.
+
+ The data for this node.
+ If this node represents data.
+ The child of this node on the 0 side.
+ The child of this node on the 1 side.
+
+
+
+ Creates a new node in the Huffman tree.
+
+ The stream to read from. It is assumed that there is (at least)
+ one more byte available to read.
+ If this node is a data-node.
+ The offset of this node in the source data, relative to the start
+ of the compressed file.
+ The indicated end of the huffman tree. If the stream is past
+ this position, the tree is invalid.
+
+
+
+ Generates and returns a string-representation of the huffman tree starting at this node.
+
+
+
+
+ The data contained in this node. May not mean anything when isData == false
.
+ Throws a NullReferenceException when this node has not been defined (ie: reference was outside the
+ bounds of the tree definition)
+
+
+
+
+ Returns true if this node represents data.
+
+
+
+
+ The child of this node at side 0
+
+
+
+
+ The child of this node at side 1
+
+
+
+
+ The parent node of this node.
+
+
+
+
+ Determines if this is the Child0 of the parent node. Assumes there is a parent.
+
+
+
+
+ Determines if this is the Child1 of the parent node. Assumes there is a parent.
+
+
+
+
+ Get or set the depth of this node. Will not be set automatically, but
+ will be set recursively (the depth of all child nodes will be updated when this is set).
+
+
+
+
+ Calculates the size of the sub-tree with this node as root.
+
+
+
+
+ The Huffman compression scheme using 4-bit data blocks.
+
+
+
+
+ Creates a new instance of the 4-bit Huffman compression format.
+
+
+
+
+ Applies Huffman compression with a datablock size of 4 bits.
+
+ The stream to compress.
+ The length of the input stream.
+ The stream to write the decompressed data to.
+ The size of the decompressed data.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ The Huffman compression scheme using 8-bit data blocks.
+
+
+
+
+ Creates a new instance of the 4-bit Huffman compression format.
+
+
+
+
+ Applies Huffman compression with a datablock size of 8 bits.
+
+ The stream to compress.
+ The length of the input stream.
+ The stream to write the decompressed data to.
+ The size of the decompressed data.
+
+
+
+ Inserts the given node into the given array, in such a location that
+ the offset to both of its children is at most the given maximum, and as large as possible.
+ In order to do this, the contents of the array may be shifted to the right.
+
+ The node to insert.
+ The array to insert the node in.
+ The maximum offset between parent and children.
+
+
+
+ Shifts the node at the given index one to the right.
+ If the distance between parent and child becomes too large due to this shift, the parent is shifted as well.
+
+ The array to shift the node in.
+ The index of the node to shift.
+ The maximum distance between parent and children.
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ Composite compression format representing both Huffman compression schemes.
+
+
+
+
+ Creates a new instance of the general Huffman compression format.
+
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format.
+
+
+
+
+ Gets if this format supports compression. Always returns true.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
+ 'Compression' format without any compression whatsoever.
+ Compression using this format will only prepend 0x00 plus the original file size to the file.
+
+
+
+
+ Creates a new instance of the NULL-compression format.
+
+
+
+
+ Checks if the given stream is (or could be) 'compressed' using the NULL compression format.
+
+
+
+
+ 'Decompresses' the given input stream using the NULL format.
+
+
+
+
+ 'Compresses' the given input stream using the NULL format.
+
+
+
+
+ Gets a short string identifying this compression format.
+
+
+
+
+ Gets a short description of this compression format (used in the program usage).
+
+
+
+
+ Gets if this format supports compressing a file.
+
+
+
+
+ Gets the value that must be given on the command line in order to compress using this format.
+
+
+
+
diff --git a/CSharp/PluginDistro/GoldenSunDD.dll b/CSharp/PluginDistro/GoldenSunDD.dll
new file mode 100644
index 0000000..0b07164
Binary files /dev/null and b/CSharp/PluginDistro/GoldenSunDD.dll differ
diff --git a/CSharp/PluginDistro/LuminousArc.dll b/CSharp/PluginDistro/LuminousArc.dll
new file mode 100644
index 0000000..8b97086
Binary files /dev/null and b/CSharp/PluginDistro/LuminousArc.dll differ
diff --git a/CSharp/Tester/Program.cs b/CSharp/Tester/Program.cs
new file mode 100644
index 0000000..9a4bf99
--- /dev/null
+++ b/CSharp/Tester/Program.cs
@@ -0,0 +1,27 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using GameFormats;
+
+namespace Tester
+{
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ Console.WriteLine("Start");
+
+ //new LuminousArc().Compress("D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/atcbg_dec.imb", "D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/atcbg_dec_cmp.imb");
+ //new LuminousArc().Decompress("D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/atcbg_dec_cmp.imb", "D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/atcbg_dec_cmp_dec.imb");
+
+ new LuminousArc().Compress("D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/o_lmoji00_dec.bin", "D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/o_lmoji00_dec_cmp.bin");
+ Console.WriteLine();
+ new LuminousArc().Decompress("D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/o_lmoji00_dec_cmp.bin", "D:/tile molester/DSLazy/NDS_UNPACK_LUMARC/test/o_lmoji00_dec_cmp_dec.bin");
+
+
+ Console.WriteLine("Success?");
+ Console.ReadLine();
+ }
+ }
+}
diff --git a/CSharp/Tester/Properties/AssemblyInfo.cs b/CSharp/Tester/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..1fb8ee3
--- /dev/null
+++ b/CSharp/Tester/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("Tester")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("Tester")]
+[assembly: AssemblyCopyright("Copyright © 2011")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("abe40de2-686c-4757-83ff-375f54a21490")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/CSharp/Tester/Tester.csproj b/CSharp/Tester/Tester.csproj
new file mode 100644
index 0000000..2bfeca5
--- /dev/null
+++ b/CSharp/Tester/Tester.csproj
@@ -0,0 +1,71 @@
+
+
+
+ Debug
+ x86
+ 8.0.30703
+ 2.0
+ {A4FABF4B-59F2-4D4B-9012-FF177980EAB7}
+ Exe
+ Properties
+ Tester
+ Tester
+ v4.0
+ Client
+ 512
+
+
+ x86
+ true
+ full
+ false
+ bin\Debug\
+ DEBUG;TRACE
+ prompt
+ 4
+
+
+ x86
+ pdbonly
+ true
+ bin\Release\
+ TRACE
+ prompt
+ 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}
+ DSDecmp
+
+
+ {8CE72663-0036-4A94-BD70-99AFE7CEEC0C}
+ GoldenSunDD
+
+
+ {4BD8DF5C-E971-45D1-B170-340D22DDB351}
+ LuminousArc
+
+
+
+
+
\ No newline at end of file
diff --git a/Fossil Fighters Tool.sln b/Fossil Fighters Tool.sln
new file mode 100644
index 0000000..70d4397
--- /dev/null
+++ b/Fossil Fighters Tool.sln
@@ -0,0 +1,22 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Fossil Fighters Tool", "Fossil Fighters Tool\Fossil Fighters Tool.csproj", "{BE7E123C-32B4-4D1D-9BCB-02C2A48D346B}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DSDecmp", "CSharp\DSDecmp\DSDecmp.csproj", "{E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {BE7E123C-32B4-4D1D-9BCB-02C2A48D346B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {BE7E123C-32B4-4D1D-9BCB-02C2A48D346B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {BE7E123C-32B4-4D1D-9BCB-02C2A48D346B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {BE7E123C-32B4-4D1D-9BCB-02C2A48D346B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {E6F419F9-D6B5-4BE7-99BB-97C48C927FF3}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+EndGlobal
diff --git a/Fossil Fighters Tool/BitmapFileReader.cs b/Fossil Fighters Tool/BitmapFileReader.cs
new file mode 100644
index 0000000..e867846
--- /dev/null
+++ b/Fossil Fighters Tool/BitmapFileReader.cs
@@ -0,0 +1,80 @@
+using System.Text;
+
+namespace Fossil_Fighters_Tool;
+
+public class BitmapFileReader : IDisposable
+{
+ public int Unknown1 { get; }
+
+ public int Width { get; }
+
+ public int Height { get; }
+
+ public int ColorType { get; }
+
+ public byte[] BitmapColorIndexes { get; }
+
+ private readonly Stream _stream;
+
+ public BitmapFileReader(Stream stream)
+ {
+ _stream = stream;
+
+ using var binaryReader = new BinaryReader(stream, Encoding.ASCII, true);
+
+ Unknown1 = binaryReader.ReadInt32();
+
+ if ((Unknown1 & 0xF) == 0x04)
+ {
+ Width = 16;
+ Height = 16;
+ }
+ else if ((Unknown1 & 0xF) == 0x08)
+ {
+ Width = 32;
+ Height = 32;
+ }
+ else if ((Unknown1 & 0xF) == 0x09)
+ {
+ Width = 32;
+ Height = 16;
+ }
+ else if ((Unknown1 & 0xF) == 0x0A)
+ {
+ Width = 16;
+ Height = 32;
+ }
+ else if ((Unknown1 & 0xF) == 0x0C)
+ {
+ Width = 64;
+ Height = 64;
+ }
+ else if ((Unknown1 & 0xF) == 0x0D)
+ {
+ Width = 64;
+ Height = 32;
+ }
+ else if ((Unknown1 & 0xF) == 0x0E)
+ {
+ Width = 32;
+ Height = 64;
+ }
+
+ ColorType = Unknown1 >> 16;
+
+ var bitmapColorIndexes = new List();
+
+ do
+ {
+ bitmapColorIndexes.Add(binaryReader.ReadByte());
+ } while (_stream.Position < _stream.Length);
+
+ BitmapColorIndexes = bitmapColorIndexes.ToArray();
+ }
+
+ public void Dispose()
+ {
+ _stream.Dispose();
+ GC.SuppressFinalize(this);
+ }
+}
\ No newline at end of file
diff --git a/Fossil Fighters Tool/ColorPaletteFileReader.cs b/Fossil Fighters Tool/ColorPaletteFileReader.cs
new file mode 100644
index 0000000..d5cbfed
--- /dev/null
+++ b/Fossil Fighters Tool/ColorPaletteFileReader.cs
@@ -0,0 +1,41 @@
+using System.Text;
+using SixLabors.ImageSharp.PixelFormats;
+
+namespace Fossil_Fighters_Tool;
+
+public class ColorPaletteFileReader : IDisposable
+{
+ public enum ColorPaletteType
+ {
+ Color16 = 0,
+ Color256 = 1
+ }
+
+ public ColorPaletteType ColorTableType { get; }
+
+ public Rgba32[] ColorTable { get; }
+
+ private readonly Stream _stream;
+
+ public ColorPaletteFileReader(Stream stream)
+ {
+ _stream = stream;
+
+ using var binaryReader = new BinaryReader(stream, Encoding.ASCII, true);
+
+ ColorTableType = Enum.Parse(binaryReader.ReadInt32().ToString());
+ ColorTable = new Rgba32[ColorTableType == ColorPaletteType.Color16 ? 16 : 256];
+
+ for (var i = 0; i < ColorTable.Length; i++)
+ {
+ var rawValue = binaryReader.ReadInt16();
+ ColorTable[i] = new Rgba32((byte) ((rawValue & 0x1F) << 3), (byte) (((rawValue >> 5) & 0x1F) << 3), (byte) (((rawValue >> 10) & 0x1F) << 3), (byte) (i == 0 ? 0 : 255));
+ }
+ }
+
+ public void Dispose()
+ {
+ _stream.Dispose();
+ GC.SuppressFinalize(this);
+ }
+}
\ No newline at end of file
diff --git a/Fossil Fighters Tool/Fossil Fighters Tool.csproj b/Fossil Fighters Tool/Fossil Fighters Tool.csproj
new file mode 100644
index 0000000..9830f6f
--- /dev/null
+++ b/Fossil Fighters Tool/Fossil Fighters Tool.csproj
@@ -0,0 +1,15 @@
+
+
+
+ Exe
+ net6.0
+ Fossil_Fighters_Tool
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Fossil Fighters Tool/MarFileReader.cs b/Fossil Fighters Tool/MarFileReader.cs
new file mode 100644
index 0000000..e22b3d5
--- /dev/null
+++ b/Fossil Fighters Tool/MarFileReader.cs
@@ -0,0 +1,103 @@
+using System.Buffers;
+using System.Text;
+
+namespace Fossil_Fighters_Tool;
+
+public class MarFileReader : IDisposable
+{
+ public readonly struct MarFilePointer
+ {
+ public int MarDataOffset { get; init; }
+
+ public int McmDataSize { get; init; }
+ }
+
+ private const int MarFileHeaderId = 0x0052414D;
+
+ public MarFilePointer[] FilePointers { get; }
+
+ private readonly FileStream _stream;
+
+ public MarFileReader(FileStream stream)
+ {
+ _stream = stream;
+
+ using var binaryReader = new BinaryReader(stream, Encoding.ASCII, true);
+
+ if (binaryReader.ReadInt32() != MarFileHeaderId)
+ {
+ throw new Exception("This is not a MAR file.");
+ }
+
+ var filePointerCount = binaryReader.ReadInt32();
+ FilePointers = new MarFilePointer[filePointerCount];
+
+ for (var i = 0; i < filePointerCount; i++)
+ {
+ FilePointers[i] = new MarFilePointer
+ {
+ MarDataOffset = binaryReader.ReadInt32(),
+ McmDataSize = binaryReader.ReadInt32()
+ };
+ }
+ }
+
+ public void ExtractTo(string outputDirectory)
+ {
+ if (!Directory.Exists(outputDirectory))
+ {
+ Directory.CreateDirectory(outputDirectory);
+ }
+
+ var buffer = ArrayPool.Shared.Rent(4096);
+
+ try
+ {
+ for (var i = 0; i < FilePointers.Length; i++)
+ {
+ var filePointer = FilePointers[i];
+ _stream.Seek(filePointer.MarDataOffset, SeekOrigin.Begin);
+
+ using var fileStream = new FileStream(Path.Combine(outputDirectory, $"{i}.mcm"), FileMode.Create);
+
+ if (i + 1 < FilePointers.Length)
+ {
+ var nextPointer = FilePointers[i + 1];
+ var fileLength = nextPointer.MarDataOffset - filePointer.MarDataOffset;
+ var written = 0;
+
+ do
+ {
+ var readCount = _stream.Read(buffer, 0, Math.Min(4096, fileLength - written));
+ if (readCount == 0) throw new EndOfStreamException();
+
+ fileStream.Write(buffer, 0, readCount);
+ fileStream.Flush();
+
+ written += readCount;
+ } while (written < fileLength);
+ }
+ else
+ {
+ int readCount;
+
+ while ((readCount = _stream.Read(buffer, 0, 4096)) > 0)
+ {
+ fileStream.Write(buffer, 0, readCount);
+ fileStream.Flush();
+ }
+ }
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(buffer);
+ }
+ }
+
+ public void Dispose()
+ {
+ _stream.Dispose();
+ GC.SuppressFinalize(this);
+ }
+}
\ No newline at end of file
diff --git a/Fossil Fighters Tool/McmFileReader.cs b/Fossil Fighters Tool/McmFileReader.cs
new file mode 100644
index 0000000..7885764
--- /dev/null
+++ b/Fossil Fighters Tool/McmFileReader.cs
@@ -0,0 +1,103 @@
+using System.Buffers;
+using System.Text;
+
+namespace Fossil_Fighters_Tool;
+
+public class McmFileReader : IDisposable
+{
+ public readonly struct McmFilePointer
+ {
+ public int DataOffset { get; init; }
+ }
+
+ private const int McmFileHeaderId = 0x004D434D;
+
+ public int DataTotalSize { get; }
+
+ public int Unknown2 { get; }
+
+ public int Unknown3 { get; }
+
+ public McmFilePointer[] FilePointers { get; }
+
+ public int EndOffset { get; }
+
+ private readonly FileStream _stream;
+
+ public McmFileReader(FileStream stream)
+ {
+ _stream = stream;
+
+ using var binaryReader = new BinaryReader(stream, Encoding.ASCII, true);
+
+ if (binaryReader.ReadInt32() != McmFileHeaderId)
+ {
+ throw new Exception("This is not a MCM file.");
+ }
+
+ DataTotalSize = binaryReader.ReadInt32();
+ Unknown2 = binaryReader.ReadInt32();
+
+ var filePointerCount = binaryReader.ReadInt32();
+ FilePointers = new McmFilePointer[filePointerCount];
+
+ Unknown3 = binaryReader.ReadInt32();
+
+ for (var i = 0; i < filePointerCount; i++)
+ {
+ FilePointers[i] = new McmFilePointer
+ {
+ DataOffset = binaryReader.ReadInt32()
+ };
+ }
+
+ EndOffset = binaryReader.ReadInt32();
+ }
+
+ public void ExtractTo(string outputDirectory)
+ {
+ if (!Directory.Exists(outputDirectory))
+ {
+ Directory.CreateDirectory(outputDirectory);
+ }
+
+ var buffer = ArrayPool.Shared.Rent(4096);
+
+ try
+ {
+ for (var i = 0; i < FilePointers.Length; i++)
+ {
+ var filePointer = FilePointers[i];
+ var nextPointer = i + 1 < FilePointers.Length ? FilePointers[i + 1] : new McmFilePointer { DataOffset = EndOffset };
+
+ _stream.Seek(filePointer.DataOffset, SeekOrigin.Begin);
+
+ using var fileStream = new FileStream(Path.Combine(outputDirectory, $"{i}.bin"), FileMode.Create);
+
+ var fileLength = nextPointer.DataOffset - filePointer.DataOffset;
+ var written = 0;
+
+ do
+ {
+ var readCount = _stream.Read(buffer, 0, Math.Min(4096, fileLength - written));
+ if (readCount == 0) throw new EndOfStreamException();
+
+ fileStream.Write(buffer, 0, readCount);
+ fileStream.Flush();
+
+ written += readCount;
+ } while (written < fileLength);
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(buffer);
+ }
+ }
+
+ public void Dispose()
+ {
+ _stream.Dispose();
+ GC.SuppressFinalize(this);
+ }
+}
\ No newline at end of file
diff --git a/Fossil Fighters Tool/MmsFileReader.cs b/Fossil Fighters Tool/MmsFileReader.cs
new file mode 100644
index 0000000..2a00bc2
--- /dev/null
+++ b/Fossil Fighters Tool/MmsFileReader.cs
@@ -0,0 +1,140 @@
+using System.Text;
+
+namespace Fossil_Fighters_Tool;
+
+public class MmsFileReader : IDisposable
+{
+ private const int MmsFileHeaderId = 0x00534D4D;
+
+ public int Unknown1 { get; }
+
+ public int Unknown2 { get; }
+
+ public int Unknown3 { get; }
+
+ public int Unknown4 { get; }
+
+ public int EndHeaderOffset { get; }
+
+ public int Unknown6 { get; }
+
+ public int[] AnimationFileIndexes { get; }
+
+ public string AnimationFileName { get; }
+
+ public int Unknown9 { get; }
+
+ public int[] ColorPaletteFileIndexes { get; }
+
+ public string ColorPaletteFileName { get; }
+
+ public int Unknown12 { get; }
+
+ public int[] BitmapFileIndexes { get; }
+
+ public string BitmapFileName { get; }
+
+ private readonly FileStream _stream;
+
+ public MmsFileReader(FileStream stream)
+ {
+ _stream = stream;
+
+ using var binaryReader = new BinaryReader(stream, Encoding.ASCII, true);
+
+ if (binaryReader.ReadInt32() != MmsFileHeaderId)
+ {
+ throw new Exception("This is not a MMS file.");
+ }
+
+ Unknown1 = binaryReader.ReadInt32();
+ Unknown2 = binaryReader.ReadInt32();
+ Unknown3 = binaryReader.ReadInt32();
+ Unknown4 = binaryReader.ReadInt32();
+ EndHeaderOffset = binaryReader.ReadInt32();
+ Unknown6 = binaryReader.ReadInt32();
+ var animationFileIndexOffset = binaryReader.ReadInt32();
+ var animationFileNameOffset = binaryReader.ReadInt32();
+ Unknown9 = binaryReader.ReadInt32();
+ var colorPaletteFileIndexOffset = binaryReader.ReadInt32();
+ var colorPaletteFileNameOffset = binaryReader.ReadInt32();
+ Unknown12 = binaryReader.ReadInt32();
+ var bitmapFileIndexOffset = binaryReader.ReadInt32();
+ var bitmapFileNameOffset = binaryReader.ReadInt32();
+
+ _stream.Seek(animationFileIndexOffset, SeekOrigin.Begin);
+
+ var animationFileIndexes = new List();
+
+ do
+ {
+ animationFileIndexes.Add(binaryReader.ReadInt32());
+ } while (_stream.Position < animationFileNameOffset);
+
+ AnimationFileIndexes = animationFileIndexes.ToArray();
+
+ _stream.Seek(animationFileNameOffset, SeekOrigin.Begin);
+
+ var animationFileName = new StringBuilder();
+ char animationFileNameChar;
+
+ while ((animationFileNameChar = binaryReader.ReadChar()) != '\0')
+ {
+ animationFileName.Append(animationFileNameChar);
+ }
+
+ AnimationFileName = animationFileName.ToString();
+
+ _stream.Seek(colorPaletteFileIndexOffset, SeekOrigin.Begin);
+
+ var colorPaletteFileIndexes = new List();
+
+ do
+ {
+ colorPaletteFileIndexes.Add(binaryReader.ReadInt32());
+ } while (_stream.Position < colorPaletteFileNameOffset);
+
+ ColorPaletteFileIndexes = colorPaletteFileIndexes.ToArray();
+
+ _stream.Seek(colorPaletteFileNameOffset, SeekOrigin.Begin);
+
+ var colorPaletteFileName = new StringBuilder();
+ char colorPaletteFileNameChar;
+
+ while ((colorPaletteFileNameChar = binaryReader.ReadChar()) != '\0')
+ {
+ colorPaletteFileName.Append(colorPaletteFileNameChar);
+ }
+
+ ColorPaletteFileName = colorPaletteFileName.ToString();
+
+ _stream.Seek(bitmapFileIndexOffset, SeekOrigin.Begin);
+
+ var bitmapFileIndexes = new List();
+
+ do
+ {
+ bitmapFileIndexes.Add(binaryReader.ReadInt32());
+ } while (_stream.Position < bitmapFileNameOffset);
+
+ BitmapFileIndexes = bitmapFileIndexes.ToArray();
+
+ _stream.Seek(bitmapFileNameOffset, SeekOrigin.Begin);
+
+ var bitmapFileName = new StringBuilder();
+ char bitmapFileNameChar;
+
+ while ((bitmapFileNameChar = binaryReader.ReadChar()) != '\0')
+ {
+ bitmapFileName.Append(bitmapFileNameChar);
+ }
+
+ BitmapFileName = bitmapFileName.ToString();
+ }
+
+ public void Dispose()
+ {
+ _stream.Dispose();
+ GC.SuppressFinalize(this);
+ }
+}
\ No newline at end of file
diff --git a/Fossil Fighters Tool/Program.cs b/Fossil Fighters Tool/Program.cs
new file mode 100644
index 0000000..56c0960
--- /dev/null
+++ b/Fossil Fighters Tool/Program.cs
@@ -0,0 +1,169 @@
+using Fossil_Fighters_Tool;
+using SixLabors.ImageSharp;
+using SixLabors.ImageSharp.PixelFormats;
+
+class Program
+{
+ public static void Main(string[] args)
+ {
+ if (args.Length < 1) return;
+
+ var inputFilePath = args[0];
+
+ if (Directory.Exists(inputFilePath))
+ {
+ foreach (var file in Directory.EnumerateFiles(inputFilePath, "*", SearchOption.AllDirectories))
+ {
+ try
+ {
+ if (Array.Exists(Path.GetDirectoryName(file)!.Split(Path.DirectorySeparatorChar), s => s.Equals("bin"))) continue;
+ Console.WriteLine($"Extracting: {file}");
+ ExtractMarFile(file);
+ }
+ catch (Exception)
+ {
+ // ignored
+ }
+ }
+
+ return;
+ }
+
+ if (!File.Exists(inputFilePath))
+ {
+ Console.WriteLine("File does not exist.");
+ return;
+ }
+
+ try
+ {
+ ExtractMarFile(inputFilePath);
+ return;
+ }
+ catch (Exception)
+ {
+ // ignored
+ }
+ }
+
+ private static void ExtractMarFile(string inputFilePath)
+ {
+ using var marFileReader = new MarFileReader(new FileStream(inputFilePath, FileMode.Open, FileAccess.Read));
+
+ var directoryName = Path.GetDirectoryName(inputFilePath)!;
+ var fileName = Path.GetFileName(inputFilePath);
+ var marExtractedDirectory = Path.Combine(directoryName, "bin", fileName);
+
+ marFileReader.ExtractTo(marExtractedDirectory);
+
+ for (var i = 0; i < marFileReader.FilePointers.Length; i++)
+ {
+ using var mcmFileReader = new McmFileReader(new FileStream(Path.Combine(marExtractedDirectory, $"{i}.mcm"), FileMode.Open));
+ mcmFileReader.ExtractTo(Path.Combine(marExtractedDirectory, i.ToString()));
+
+ for (int j = 0; j < mcmFileReader.FilePointers.Length; j++)
+ {
+ try
+ {
+ ExtractMmsFile(Path.Combine(marExtractedDirectory, i.ToString(), $"{j}.bin"));
+ }
+ catch (Exception)
+ {
+ // ignored
+ }
+ }
+ }
+ }
+
+ private static void ExtractMmsFile(string inputFilePath)
+ {
+ using var mmsFileReader = new MmsFileReader(new FileStream(inputFilePath, FileMode.Open, FileAccess.Read));
+
+ var inputFileDirectory = Path.GetDirectoryName(inputFilePath)!;
+ var colorPalettes = new List();
+
+ for (var i = 0; i < mmsFileReader.ColorPaletteFileIndexes.Length; i++)
+ {
+ var colorPaletteFile = Path.Combine(inputFileDirectory, "..", "..", mmsFileReader.ColorPaletteFileName, mmsFileReader.ColorPaletteFileIndexes[i].ToString(), "0.bin");
+
+ if (!File.Exists(colorPaletteFile))
+ {
+ ExtractMarFile(Path.Combine(inputFileDirectory, "..", "..", "..", mmsFileReader.ColorPaletteFileName));
+ }
+
+ using var colorPaletteFileReader = new ColorPaletteFileReader(new FileStream(colorPaletteFile, FileMode.Open, FileAccess.Read));
+ colorPalettes.Add(colorPaletteFileReader);
+ }
+
+ for (var i = 0; i < mmsFileReader.BitmapFileIndexes.Length; i++)
+ {
+ try
+ {
+ var bitmapFile = Path.Combine(inputFileDirectory, "..", "..", mmsFileReader.BitmapFileName, mmsFileReader.BitmapFileIndexes[i].ToString(), "0.bin");
+
+ using var bitmapFileReader = new BitmapFileReader(new FileStream(bitmapFile, FileMode.Open, FileAccess.Read));
+
+ var width = bitmapFileReader.Width == 0 ? 16 : bitmapFileReader.Width;
+ var height = bitmapFileReader.Height == 0 ? 16 : bitmapFileReader.Height;
+
+ using var image = new Image(width, height);
+
+ var bitmapIndex = 0;
+ var gridX = 0;
+ var gridY = 0;
+
+ if (bitmapFileReader.ColorType == 0)
+ {
+ if (colorPalettes.Count > 1) return;
+
+ while (bitmapIndex * 2 < width * height)
+ {
+ for (var y = 0; y < 8; y++)
+ {
+ for (var x = 0; x < 8; x += 2)
+ {
+ image[x + gridX * 8, y + gridY * 8] = colorPalettes[0].ColorTable[bitmapFileReader.BitmapColorIndexes[bitmapIndex] >> 4];
+ image[x + 1 + gridX * 8, y + gridY * 8] = colorPalettes[0].ColorTable[bitmapFileReader.BitmapColorIndexes[bitmapIndex] & 0xF];
+ bitmapIndex++;
+ }
+ }
+
+ gridX++;
+
+ if (gridX >= width / 8)
+ {
+ gridX = 0;
+ gridY++;
+ }
+ }
+ }
+ else if (bitmapFileReader.ColorType == 1)
+ {
+ while (bitmapIndex < width * height)
+ {
+ for (var y = 0; y < 8; y++)
+ {
+ for (var x = 0; x < 8; x++)
+ {
+ image[x + gridX * 8, y + gridY * 8] = colorPalettes[0].ColorTable[bitmapFileReader.BitmapColorIndexes[bitmapIndex++]];
+ }
+ }
+
+ gridX++;
+
+ if (gridX >= width / 8)
+ {
+ gridX = 0;
+ gridY++;
+ }
+ }
+ }
+
+ image.SaveAsPng(Path.Combine(inputFileDirectory, $"{mmsFileReader.BitmapFileIndexes[i]}.png"));
+ }
+ catch (Exception)
+ {
+ }
+ }
+ }
+}
diff --git a/global.json b/global.json
new file mode 100644
index 0000000..9e5e1fd
--- /dev/null
+++ b/global.json
@@ -0,0 +1,7 @@
+{
+ "sdk": {
+ "version": "6.0.0",
+ "rollForward": "latestMajor",
+ "allowPrerelease": true
+ }
+}
\ No newline at end of file