Skip to content

Commit

Permalink
improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
blawar committed Feb 6, 2024
1 parent e9795e2 commit 5cf47cd
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 59 deletions.
12 changes: 11 additions & 1 deletion Fs/IndexedFile.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,23 @@ def setVersion(self, version):
def getVersion(self):
return self.version or ''

def getVersionNumber(self):
if self.version is None:
return None
try:
return int(self.version)
except:
return None

def isUpdate(self):
return self.titleId is not None and self.titleId.endswith('800')

def isDLC(self):
return self.titleId is not None and not self.isUpdate() and not self.titleId.endswith('000')

def isBase(self):
return self.titleId is not None and self.titleId.endswith('000')

def title(self):
if not self.titleId:
raise IOError('NSP no titleId set')
Expand Down Expand Up @@ -171,7 +182,6 @@ def move(self, forceNsp=False):
except BaseException as e:
if not str(e).startswith('too many bktr entries'):
raise



try:
Expand Down
14 changes: 11 additions & 3 deletions nut.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,8 @@ def download(id):
parser.add_argument('--scrape-nsuid', help='Scrape eshop title by nsuid')
parser.add_argument('--scrape-shogun', nargs='*', help='Scrape ALL titles from shogun')
parser.add_argument('--scrape-shogun-missed', nargs='*', help='Scrape titles that are not advertised by shogun but in our database')
parser.add_argument('--scrape-shogun-refresh', action="store_true", help='Full refresh already known title ids from shogun')
parser.add_argument('--scrape-shogun-refresh-quick', action="store_true", help='Quick refresh of already known title ids from shogun')
parser.add_argument('--scrape-shogun-delta', nargs='*', help='Scrape new titles from shogun')
parser.add_argument('--scrape-shogun-unnamed', nargs='*', help='Refresh missing DLC/Base metadata')
parser.add_argument('-E', '--get-edge-token', action="store_true", help='Get edge token')
Expand Down Expand Up @@ -606,7 +608,7 @@ def download(id):
if args.C:
for filePath in args.file:
try:
nut.compress(filePath, 21 if args.level is None else args.level, args.output)
nut.compress(filePath, 21 if args.level is None else args.level, args.output, copy = args.copy)

except BaseException as e:
Print.error(str(e))
Expand Down Expand Up @@ -828,9 +830,15 @@ def download(id):
if args.system_update:
cdn.downloadSystemUpdate()

if args.scrape_shogun_refresh:
nut.scrapeShogunThreaded(True, shogunList = False)

if args.scrape_shogun_refresh_quick:
nut.scrapeShogunThreaded(False, shogunList = False)

if args.scrape_shogun is not None:
if len(args.scrape_shogun) == 0:
nut.scrapeShogunThreaded(True)
nut.scrapeShogunThreaded(True, shogunList = True)
else:
nut.initTitles()
nut.initFiles()
Expand Down Expand Up @@ -961,7 +969,7 @@ def download(id):
if args.compress_all:
nut.initTitles()
nut.initFiles()
nut.compressAll(19 if args.level is None else args.level)
nut.compressAll(19 if args.level is None else args.level, copy = args.copy)

if args.decompress_all:
nut.decompressAll()
Expand Down
3 changes: 3 additions & 0 deletions nut/Titles.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,9 @@ def save(fileName='titledb/titles.json', full=True):
def saveRegion(region, language):
saveTitlesJson(data(region, language), 'titledb/%s.%s.json' % (region, language))

def regionModified(region, language):
return os.path.getmtime('titledb/%s.%s.json' % (region, language))

class Queue:
def __init__(self):
self.queue = []
Expand Down
185 changes: 130 additions & 55 deletions nut/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from binascii import unhexlify as uhx
from contextlib import closing

import shutil
import colorama
import requests
import zstandard
Expand Down Expand Up @@ -139,8 +140,19 @@ def sortedFs(nca):
fs.sort(key=lambda x: x.offset)
return fs

def compress(filePath, compressionLevel=19, outputDir=None):
def compress(filePath, compressionLevel=19, outputDir=None, copy = False):
filePath = os.path.abspath(filePath)
copy = True

if copy == True:
tmpFilePath = os.path.abspath(os.path.join(outputDir, os.path.basename(filePath)))

if filePath == tmpFilePath:
copy = False
else:
Print.info('copying %s -> %s' % (filePath, tmpFilePath))
shutil.copyfile(filePath, tmpFilePath)
filePath = tmpFilePath

CHUNK_SZ = 0x1000000

Expand Down Expand Up @@ -268,6 +280,10 @@ def compress(filePath, compressionLevel=19, outputDir=None):
f.write(buffer)

newNsp.close()

if copy:
os.unlink(tmpFilePath)

return nszPath

def compressWorker(q, level, output, totalStatus):
Expand Down Expand Up @@ -310,7 +326,7 @@ def ganymede(config):
except BaseException:
raise

def compressAll(level=19):
def compressAll(level=19, copy = False):
initTitles()
initFiles()

Expand Down Expand Up @@ -718,6 +734,7 @@ def pullWorker(q, s):
Nsps.save()
except BaseException as e:
Print.error('FTP SYNC EXCEPTION: ' + str(e))
traceback.print_exc(file=sys.stdout)
# raise #TODO
s.add()
Print.info('thread exiting')
Expand Down Expand Up @@ -1232,11 +1249,23 @@ def extractNcaMeta(files = []):
for path, nsp in Nsps.files.items():
if not nsp.path.endswith('.nsp'): # and not nsp.path.endswith('.xci'):
continue

if nsp.isBase() and nsp.getVersionNumber() != 0:
continue

if nsp.isDLC():
continue

try:
if hasattr(nsp, 'extractedNcaMeta') and (nsp.extractedNcaMeta or nsp.extractedNcaMeta == 1) or '0100000000000816' in path:
# Print.info('skipping')
continue

title = nsp.title()

if title and not title.isActive(True):
continue

if hasCnmt(nsp.titleId, nsp.version):
continue

Expand Down Expand Up @@ -1264,6 +1293,8 @@ def extractNcaMeta(files = []):
try:
c += 1

Print.info('processing %s' % nsp.path)

nsp.open(path, 'rb')

if nsp.title().key == '':
Expand Down Expand Up @@ -1303,58 +1334,62 @@ def extractNcaMeta(files = []):
# print(ncaDataMap)

for n in nsp:
if not isinstance(n, Nca):
continue

ncaId = n._path.split('.')[0]
data = getNca(ncaId)

data.contentType = int(n.header.contentType)
data.isGameCard = n.header.isGameCard
data.cryptoType = n.header.cryptoType
data.keyIndex = n.header.keyIndex
data.size = n.header.size
data.titleId = n.header.titleId
data.contentIndex = n.header.contentIndex
data.sdkVersion = n.header.sdkVersion
data.cryptoType2 = n.header.cryptoType2
data.rightsId = n.header.rightsId

if data.rightsId == b'00000000000000000000000000000000':
data.rightsId = None
else:
data.rightsId = data.rightsId.decode()
try:
if not isinstance(n, Nca):
continue

if data.contentType == 1:
for p in n:
for m in p:
if not isinstance(m, Cnmt):
continue

cnmt = getCnmt(m.titleId, m.version)
cnmt.contentEntries = []
cnmt.metaEntries = []
cnmt.titleType = m.titleType
for e in m.contentEntries:
if not e.ncaId.upper() in ncaDataMap:
Print.info(ncaDataMap)
Print.info('nca missing: ' + e.ncaId.upper())
ncaId = n._path.split('.')[0]
data = getNca(ncaId)

data.contentType = int(n.header.contentType)
data.isGameCard = n.header.isGameCard
data.cryptoType = n.header.cryptoType
data.keyIndex = n.header.keyIndex
data.size = n.header.size
data.titleId = n.header.titleId
data.contentIndex = n.header.contentIndex
data.sdkVersion = n.header.sdkVersion
data.cryptoType2 = n.header.cryptoType2
data.rightsId = n.header.rightsId

if data.rightsId == b'00000000000000000000000000000000':
data.rightsId = None
else:
data.rightsId = data.rightsId.decode()

if data.contentType == 1:
for p in n:
for m in p:
if not isinstance(m, Cnmt):
continue
mapData = ncaDataMap[e.ncaId.upper()]
if mapData is not None and (mapData.buildId is not None):
cnmt.contentEntries.append({'ncaId': e.ncaId, 'type': e.type, 'buildId': mapData.buildId})
else:
cnmt.contentEntries.append({'ncaId': e.ncaId, 'type': e.type})

for e in m.metaEntries:
cnmt.metaEntries.append({'titleId': e.titleId, 'version': e.version, 'type': e.type, 'install': e.install})

cnmt.requiredSystemVersion = m.requiredSystemVersion
cnmt.requiredApplicationVersion = m.requiredApplicationVersion
cnmt.otherApplicationId = m.otherApplicationId
cnmt = getCnmt(m.titleId, m.version)
cnmt.contentEntries = []
cnmt.metaEntries = []
cnmt.titleType = m.titleType
for e in m.contentEntries:
if not e.ncaId.upper() in ncaDataMap:
Print.info(ncaDataMap)
Print.info('nca missing: ' + e.ncaId.upper())
continue
mapData = ncaDataMap[e.ncaId.upper()]
if mapData is not None and (mapData.buildId is not None):
cnmt.contentEntries.append({'ncaId': e.ncaId, 'type': e.type, 'buildId': mapData.buildId})
else:
cnmt.contentEntries.append({'ncaId': e.ncaId, 'type': e.type})

for e in m.metaEntries:
cnmt.metaEntries.append({'titleId': e.titleId, 'version': e.version, 'type': e.type, 'install': e.install})

cnmt.requiredSystemVersion = m.requiredSystemVersion
cnmt.requiredApplicationVersion = m.requiredApplicationVersion
cnmt.otherApplicationId = m.otherApplicationId

# print(str(data.__dict__))
except BaseException as e:
Print.info('exception: %s %s' % (path, str(e)))
continue

# print(str(data.__dict__))
Print.info('processed %s' % nsp.path)
nsp.extractedNcaMeta = True
except BaseException as e:
Print.info('exception: %s %s' % (path, str(e)))
Expand Down Expand Up @@ -1398,27 +1433,63 @@ def scrapeShogun(force=False, region=None):
cdn.Shogun.scrapeTitles(region, force=force)
Titles.saveAll()

def scrapeShogunWorker(q, force = False, refresh = False):
def scrapeShogunWorker(q, bar, force = False, refresh = False, shogunList = True):
while True:
region = q.get()

if region is None:
break

try:
cdn.Shogun.scrapeTitles(region, force = force, refresh = refresh, save = False)
if shogunList == True:
cdn.Shogun.scrapeTitles(region, force = force, refresh = refresh, save = False)
else:
for language in Config.regionLanguages()[region]:
#if (Titles.regionModified(region, language) > os.path.getmtime('titledb/versions.json')):
# continue
#if ('%s.%s.json' % (region, language) ) in ['AR.en.json', 'AR.es.json', 'AT.de.json', 'BG.en.json', 'BR.en.json', 'BR.pt.json', 'CA.en.json', 'CL.en.json', 'CL.es.json', 'CN.zh.json', 'CO.en.json', 'CO.es.json', 'CY.en.json', 'CZ.en.json', 'DE.de.json', 'DK.en.json', 'EE.en.json', 'ES.es.json', 'FI.en.json', 'GR.en.json', 'HK.zh.json', 'HR.en.json', 'HU.en.json', 'IE.en.json', 'KR.ko.json', 'LT.en.json', 'LV.en.json', 'MT.en.json', 'MX.en.json', 'NL.nl.json', 'NO.en.json', 'NZ.en.json', 'PE.en.json', 'PE.es.json', 'PL.en.json', 'PT.pt.json', 'RO.en.json', 'RU.ru.json']:
# continue
Print.info('searching %s %s' % (region, language))

keys = []

for x in Titles.keys():
if not x.endswith('800'):
keys.append(x)
status = Status.create(len(keys), desc='searching %s %s' % (region, language), unit='')
for id in keys:
try:
l = cdn.Shogun.ids(id, region = region, language = language or 'en', force=(force or shogunList == False))
status.add(1)

if not l or len(l) == 0 or len(l['id_pairs']) == 0:
#Print.info('\tno nsuId\'s found')
pass
else:
#print(l)
for t in l['id_pairs']:
#print('\tnsuId: ' + str(t['id']))
#print(json.dumps(cdn.Shogun.scrapeTitle(t['id'], region=region, language=language, force=True).__dict__))
cdn.Shogun.scrapeTitle(t['id'], region=region, language=language, force=True)

except BaseException as e:
Print.info('shogun worker inner exception: ' + str(e))
traceback.print_exc(file=sys.stdout)
status.close()
Titles.saveRegion(region, language)
except BaseException as e:
Print.info('shogun worker exception: ' + str(e))
traceback.print_exc(file=sys.stdout)

q.task_done()
bar.add(1)

def scrapeShogunThreaded(force = False, refresh = False):
def scrapeShogunThreaded(force = False, refresh = False, shogunList = True):
initTitles()
initFiles()

scrapeThreads = []
numThreads = 4
numThreads = 8

if Config.reverse:
q = queue.LifoQueue()
Expand All @@ -1428,8 +1499,10 @@ def scrapeShogunThreaded(force = False, refresh = False):
for region in cdn.regions():
q.put(region)

bar = Status.create(q.qsize(), desc="Scanning shogun...", unit='')

for i in range(numThreads):
t = threading.Thread(target=scrapeShogunWorker, args=[q, force, refresh])
t = threading.Thread(target=scrapeShogunWorker, args=[q, bar, force, refresh, shogunList])
t.daemon = True
t.start()
scrapeThreads.append(t)
Expand All @@ -1447,6 +1520,8 @@ def scrapeShogunThreaded(force = False, refresh = False):
t.join()
Print.info('joined thread %d of %d' % (i, len(scrapeThreads)))

bar.close()

Print.info('saving titles')
Titles.save()
Print.info('titles saved')
Expand Down

0 comments on commit 5cf47cd

Please sign in to comment.