mirror of https://github.com/hykilpikonna/AquaDX
[+] Data convert script: Add ongeki
parent
5973b3bfe5
commit
02dc142eea
|
@ -7,9 +7,12 @@ from pathlib import Path
|
||||||
import orjson
|
import orjson
|
||||||
import xmltodict
|
import xmltodict
|
||||||
from hypy_utils import write
|
from hypy_utils import write
|
||||||
|
from hypy_utils.logging_utils import setup_logger
|
||||||
from hypy_utils.tqdm_utils import pmap
|
from hypy_utils.tqdm_utils import pmap
|
||||||
from wand.image import Image
|
from wand.image import Image
|
||||||
|
|
||||||
|
log = setup_logger()
|
||||||
|
|
||||||
|
|
||||||
def convert_path(file: Path):
|
def convert_path(file: Path):
|
||||||
# Get path relative to source
|
# Get path relative to source
|
||||||
|
@ -40,7 +43,7 @@ def convert_one(file: Path):
|
||||||
try:
|
try:
|
||||||
xml = xmltodict.parse(file.read_text())
|
xml = xmltodict.parse(file.read_text())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f'Error parsing {file}: {e}')
|
log.info(f'Error parsing {file}: {e}')
|
||||||
return
|
return
|
||||||
|
|
||||||
# There should only be one root element, expand it
|
# There should only be one root element, expand it
|
||||||
|
@ -53,6 +56,9 @@ def convert_one(file: Path):
|
||||||
if '@xmlns:xsd' in xml:
|
if '@xmlns:xsd' in xml:
|
||||||
del xml['@xmlns:xsd']
|
del xml['@xmlns:xsd']
|
||||||
|
|
||||||
|
if target.exists():
|
||||||
|
log.info(f'Overwriting {target}')
|
||||||
|
|
||||||
# Write json
|
# Write json
|
||||||
write(target, orjson.dumps(xml))
|
write(target, orjson.dumps(xml))
|
||||||
|
|
||||||
|
@ -68,7 +74,7 @@ def convert_dds(file: Path):
|
||||||
img.format = 'jpeg'
|
img.format = 'jpeg'
|
||||||
img.save(filename=str(target.with_suffix('.png')))
|
img.save(filename=str(target.with_suffix('.png')))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f'Error converting {file}: {e}')
|
log.info(f'Error converting {file}: {e}')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
@ -91,14 +97,8 @@ def get(d: dict, *keys: str):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def combine_music_mai2():
|
def convert_music_mai2(d: dict) -> (str, dict):
|
||||||
# Read all music json files
|
return d['name']['id'], {
|
||||||
music_files = list(dst.rglob('music/*.json'))
|
|
||||||
print(f'> Found {len(music_files)} music files')
|
|
||||||
jsons = [orjson.loads(f.read_text()) for f in music_files]
|
|
||||||
|
|
||||||
# Combine all music
|
|
||||||
combined = {d['name']['id']: {
|
|
||||||
'name': d['name']['str'],
|
'name': d['name']['str'],
|
||||||
'ver': d.get('version') or d.get('releaseTagName')['str'],
|
'ver': d.get('version') or d.get('releaseTagName')['str'],
|
||||||
'composer': d['artistName']['str'],
|
'composer': d['artistName']['str'],
|
||||||
|
@ -111,44 +111,48 @@ def combine_music_mai2():
|
||||||
'lv_id': n['musicLevelID'],
|
'lv_id': n['musicLevelID'],
|
||||||
'notes': int(n['maxNotes']),
|
'notes': int(n['maxNotes']),
|
||||||
} for n in d['notesData']['Notes'] if n['isEnable'] != 'false']
|
} for n in d['notesData']['Notes'] if n['isEnable'] != 'false']
|
||||||
} for d in jsons}
|
}
|
||||||
|
|
||||||
# Write combined music
|
|
||||||
write(dst / '00/all-music.json', orjson.dumps(combined))
|
|
||||||
|
|
||||||
|
|
||||||
def combine_music_chu3():
|
def convert_music_chu3(d: dict) -> (str, dict):
|
||||||
# Read all music json files
|
return d['name']['id'], {
|
||||||
music_files = list(dst.rglob('music/*.json'))
|
'name': d['name']['str'],
|
||||||
print(f'> Found {len(music_files)} music files')
|
'ver': d['releaseTagName']['str'],
|
||||||
jsons = [orjson.loads(f.read_text()) for f in music_files]
|
'composer': d['artistName']['str'],
|
||||||
|
'genre': get(d, 'genreName.list.StringID.str'),
|
||||||
|
'lock': d['firstLock'],
|
||||||
|
'notes': [{
|
||||||
|
'lv': int(n['level']) + (int(n['levelDecimal']) / 100.0),
|
||||||
|
'designer': n.get('notesDesigner'),
|
||||||
|
'lv_id': n['type']['id'],
|
||||||
|
} for n in d['fumens']['MusicFumenData'] if n['enable'] != 'false']
|
||||||
|
}
|
||||||
|
|
||||||
# Combine all music
|
|
||||||
combined = {}
|
|
||||||
for d in jsons:
|
|
||||||
combined[d['name']['id']] = {
|
|
||||||
'name': d['name']['str'],
|
|
||||||
'ver': d['releaseTagName']['str'],
|
|
||||||
'composer': d['artistName']['str'],
|
|
||||||
'genre': get(d, 'genreName.list.StringID.str'),
|
|
||||||
'lock': d['firstLock'],
|
|
||||||
'notes': [{
|
|
||||||
'lv': int(n['level']) + (int(n['levelDecimal']) / 100.0),
|
|
||||||
'designer': n.get('notesDesigner'),
|
|
||||||
'lv_id': n['type']['id'],
|
|
||||||
} for n in d['fumens']['MusicFumenData'] if n['enable'] != 'false']
|
|
||||||
}
|
|
||||||
|
|
||||||
# Write combined music
|
def convert_music_ongeki(d: dict) -> (str, dict):
|
||||||
write(dst / '00/all-music.json', orjson.dumps(combined))
|
return d['Name']['id'], {
|
||||||
|
'name': d['Name']['str'],
|
||||||
|
'ver': d['VersionID']['id'],
|
||||||
|
'composer': d['ArtistName']['str'],
|
||||||
|
'genre': d['Genre']['str'],
|
||||||
|
'lock': f"{d['CostToUnlock']} {d['IsLockedAtTheBeginning']}",
|
||||||
|
'notes': [{
|
||||||
|
'lv': int(n['FumenConstIntegerPart']) + (int(n['FumenConstFractionalPart']) / 100.0),
|
||||||
|
'lv_id': i,
|
||||||
|
} for i, n in enumerate(d['FumenData']['FumenData']) if n['FumenFile']['path'] is not None],
|
||||||
|
'lunatic': d['IsLunatic']
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
agupa = argparse.ArgumentParser()
|
agupa = argparse.ArgumentParser()
|
||||||
# Or chusan/App/data
|
# Source can be one of the following:
|
||||||
|
# - maimai/Package/Sinmai_Data/StreamingAssets
|
||||||
|
# - chusan/App/data
|
||||||
|
# - ongeki/package/mu3_Data/StreamingAssets/GameData
|
||||||
agupa.add_argument('source', type=str, help='Package/Sinmai_Data/StreamingAssets directory')
|
agupa.add_argument('source', type=str, help='Package/Sinmai_Data/StreamingAssets directory')
|
||||||
agupa.add_argument('destination', type=str, help='Directory to extract to')
|
agupa.add_argument('destination', type=str, help='Directory to extract to')
|
||||||
agupa.add_argument('-g', '--game', type=str, help='Game to convert', default='mai2', choices=['mai2', 'chu3'])
|
agupa.add_argument('-g', '--game', type=str, help='Game to convert', default='mai2', choices=['mai2', 'chu3', 'ongeki'])
|
||||||
args = agupa.parse_args()
|
args = agupa.parse_args()
|
||||||
|
|
||||||
src = Path(args.source)
|
src = Path(args.source)
|
||||||
|
@ -165,7 +169,7 @@ if __name__ == '__main__':
|
||||||
if not d.is_dir():
|
if not d.is_dir():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f'Relocating {d}')
|
log.info(f'Relocating {d}')
|
||||||
for file in d.rglob('*.png'):
|
for file in d.rglob('*.png'):
|
||||||
id = ''.join(filter(str.isdigit, file.stem))
|
id = ''.join(filter(str.isdigit, file.stem))
|
||||||
shutil.move(file, d / f'{id}.png')
|
shutil.move(file, d / f'{id}.png')
|
||||||
|
@ -178,31 +182,36 @@ if __name__ == '__main__':
|
||||||
# Assert that target directory does not exist
|
# Assert that target directory does not exist
|
||||||
if dst.exists():
|
if dst.exists():
|
||||||
if input(f'{dst} already exists, delete? (y/n): ') == 'y':
|
if input(f'{dst} already exists, delete? (y/n): ') == 'y':
|
||||||
print(f'Deleting {dst}')
|
log.info(f'Deleting {dst}')
|
||||||
shutil.rmtree(dst)
|
shutil.rmtree(dst)
|
||||||
|
|
||||||
# Find all xml files in the source directory
|
# Find all xml files in the source directory
|
||||||
files = list(src.rglob('*.xml'))
|
files = list(src.rglob('*.xml'))
|
||||||
print(f'Found {len(files)} xml files')
|
log.info(f'Found {len(files)} xml files')
|
||||||
|
|
||||||
# Multithreaded map
|
# Multithreaded map
|
||||||
pmap(convert_one, files, desc='Converting', unit='file', chunksize=50)
|
pmap(convert_one, files, desc='Converting', unit='file', chunksize=50)
|
||||||
print('> Finished converting')
|
log.info('> Finished converting')
|
||||||
|
|
||||||
# Find all .dds files in the source A000 directory
|
# Find all .dds files in the source A000 directory
|
||||||
dds_files = list(src.rglob('*.dds'))
|
dds_files = list(src.rglob('*.dds'))
|
||||||
print(f'Found {len(dds_files)} dds files')
|
log.info(f'Found {len(dds_files)} dds files')
|
||||||
|
|
||||||
# Convert and copy dds files (CPU-intensive)
|
# Convert and copy dds files (CPU-intensive)
|
||||||
pmap(convert_dds, dds_files, desc='Converting DDS', unit='file', chunksize=50, max_workers=os.cpu_count() - 2)
|
pmap(convert_dds, dds_files, desc='Converting DDS', unit='file', chunksize=50, max_workers=os.cpu_count() - 2)
|
||||||
print('> Finished converting DDS')
|
log.info('> Finished converting DDS')
|
||||||
|
|
||||||
# Convert all music
|
# Convert all music
|
||||||
print('Combining music')
|
log.info('Combining music')
|
||||||
if args.game == 'mai2':
|
music_files = list(dst.rglob('music/*.json'))
|
||||||
combine_music_mai2()
|
log.info(f'> Found {len(music_files)} music files')
|
||||||
if args.game == 'chu3':
|
jsons = [orjson.loads(f.read_text()) for f in music_files]
|
||||||
combine_music_chu3()
|
|
||||||
|
converter = {'mai2': convert_music_mai2, 'chu3': convert_music_chu3, 'ongeki': convert_music_ongeki}[args.game]
|
||||||
|
combined = {k: v for k, v in [converter(d) for d in jsons]}
|
||||||
|
|
||||||
|
# Write combined music
|
||||||
|
write(dst / '00/all-music.json', orjson.dumps(combined))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue