[-] Delete unused files

pull/19/head^2
Azalea 2024-03-04 21:18:17 -05:00
parent a7888a63fa
commit a781c2d665
6 changed files with 0 additions and 8512 deletions

BIN
.github/workflows/DATA vendored

Binary file not shown.

View File

@ -1,26 +0,0 @@
name: AquaMai Build
on:
push:
branches: [ master ]
jobs:
build:
runs-on: windows-latest
defaults:
run:
working-directory: ./AquaMai
steps:
- uses: actions/checkout@v4
- name: Setup MSBuild Path
uses: microsoft/setup-msbuild@v1.0.2
- name: Decrypt DLL
run: gpg -d --batch --passphrase "${{ secrets.DLL_PASSPHRASE }}" -o .\Libs\Assembly-CSharp.dll ..\.github\workflows\DATA
- name: Build with MSBuild
run: msbuild.exe .\AquaMai.csproj

View File

@ -1,43 +0,0 @@
# Data server for Aqua frontend
server
{
listen 443 ssl;
listen [::]:443 ssl;
server_name aqua-data.example.com;
# / should redirect to the actual website aquadx.hydev.org
location / {
return 301 https://example.com;
}
# /maimai should be a file server on /etc/nginx/aqua-data/maimai
# These are generated using:
# cd Package/Sinmai_Data/StreamingAssets/A000
# mkdir -p /etc/nginx/aqua-data/maimai
# python3 AquaDX/tools/data_convert.py .. /etc/nginx/aqua-data/maimai/meta
# rm -rf MovieData SoundData
# (Open AssetRipper and open folder Package/Sinmai_Data)
# (Export all assets to /tmp/maimai)
# cd /tmp/maimai/ExportedProject/Assets
# find -name "*.meta" -delete -print
# find -name "*.asset" -delete -print
# cp -r assetbundle Texture2D Resources/common/sprites /etc/nginx/aqua-data/maimai
# rm -rf /tmp/maimai
location /maimai {
root /etc/nginx/aqua-data;
# Specify UTF-8 encoding
charset utf-8;
# CORS allow all
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
autoindex on;
}
ssl_certificate /dev/null;
ssl_certificate_key /dev/null;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,123 +0,0 @@
import argparse
import os
import shutil
from pathlib import Path
import orjson
import xmltodict
from hypy_utils import write
from hypy_utils.tqdm_utils import pmap
def convert_one(file: Path):
# Get path relative to source
rel = file.relative_to(src)
# If path is one-level under StreamingAssets, ignore it (e.g. StreamingAssets/A000/Data.xml)
if len(rel.parts) <= 2:
return
# Read xml
xml = xmltodict.parse(file.read_text())
# There should only be one root element, expand it
assert len(xml) == 1, f'Expected 1 root element, got {len(xml)}'
xml = xml[list(xml.keys())[0]]
# Remove @xmlns:xsi and @xmlns:xsd
if '@xmlns:xsi' in xml:
del xml['@xmlns:xsi']
if '@xmlns:xsd' in xml:
del xml['@xmlns:xsd']
# Generate target file path
# Ignore the first segment of the relative path, and append to the destination
# Also collapse the single-item directory into the filename
# e.g. {src}/A000/music/music000001/Music.xml -> {dst}/music/000001.json
target = dst / '/'.join(rel.parts[1:-2])
file_id = ''.join(filter(str.isdigit, rel.parts[-2]))
target = target / f'{file_id}.json'
# Create directories if they don't exist
target.parent.mkdir(parents=True, exist_ok=True)
# Write json
write(target, orjson.dumps(xml))
def combine_music():
# Read all music json files
music_files = list(dst.rglob('music/*.json'))
print(f'> Found {len(music_files)} music files')
jsons = [orjson.loads(f.read_text()) for f in music_files]
# Combine all music
combined = {d['name']['id']: {
'name': d['name']['str'],
'ver': int(d['version']),
'composer': d['artistName']['str'],
'genre': d['genreName']['str'],
'bpm': int(d['bpm']),
'lock': f"{d['lockType']} {d['subLockType']}",
'notes': [{
'lv': int(n['level']) + (int(n['levelDecimal']) / 10),
'designer': n['notesDesigner']['str'],
'lv_id': n['musicLevelID'],
'notes': int(n['maxNotes']),
} for n in d['notesData']['Notes'] if n['isEnable'] != 'false']
} for d in jsons}
# Write combined music
write(dst / '00/all-music.json', orjson.dumps(combined))
if __name__ == '__main__':
agupa = argparse.ArgumentParser()
agupa.add_argument('source', type=str, help='Package/Sinmai_Data/StreamingAssets directory')
agupa.add_argument('destination', type=str, help='Directory to extract to')
args = agupa.parse_args()
src = Path(args.source)
dst = Path(args.destination)
# Special post-convert command to relocate stuff
if args.source == 'post-convert':
ori = dst
dst = dst.parent
# In assetbundle/dir, move each XXX_{id}_XXX.png to assetbundle/dir/{id}.png
for d in os.listdir(dst / 'assetbundle'):
d = dst / 'assetbundle' / d
if not d.is_dir():
continue
print(f'Relocating {d}')
for file in d.rglob('*.png'):
id = ''.join(filter(str.isdigit, file.stem))
shutil.move(file, d / f'{id}.png')
exit(0)
# Assert that A000 exists in the source directory
assert (src / 'A000').exists(), f'{src}/A000 does not exist'
# Assert that target directory does not exist
if dst.exists():
if input(f'{dst} already exists, delete? (y/n): ') == 'y':
print(f'Deleting {dst}')
shutil.rmtree(dst)
# Find all xml files in the source directory
files = list(src.rglob('*.xml'))
print(f'Found {len(files)} xml files')
# Multithreaded map
pmap(convert_one, files, desc='Converting', unit='file', chunksize=50)
print('> Finished converting')
# Convert all music
print('Combining music')
combine_music()

View File

@ -1,144 +0,0 @@
import argparse
from pathlib import Path
from typing import NamedTuple
import pandas as pd
import sqlglot
import xmltodict
from hypy_utils import write
def read_list(search_path: str, pattern: str, parse_fn) -> list:
search_path = path / search_path
data = [parse_fn(xmltodict.parse(t.read_text('utf-8')))
for t in list(search_path.glob(pattern))]
Path('csv').mkdir(exist_ok=True, parents=True)
pd.DataFrame(data).to_csv(f'csv/{search_path.name}.csv', index=False)
return data
class Ticket(NamedTuple):
id: int # TicketData.name.id
name: str # TicketData.name.str
credits: int # TicketData.creditNum
kind: str # TicketData.ticketKind.str
max: int # TicketData.maxCount
detail: str # TicketData.detail
eventId: int # TicketData.ticketEvent.id
eventName: str # TicketData.ticketEvent.str
def parse_ticket(d: dict) -> Ticket:
return Ticket(
id=int(d['TicketData']['name']['id']),
name=d['TicketData']['name']['str'],
credits=int(d['TicketData']['creditNum']),
kind=d['TicketData']['ticketKind']['str'],
max=int(d['TicketData']['maxCount']),
detail=d['TicketData']['detail'],
eventId=int(d['TicketData']['ticketEvent']['id']),
eventName=d['TicketData']['ticketEvent']['str']
)
class Event(NamedTuple):
id: int # EventData.name.id
type: int # EventData.infoType
detail: str # EventData.name.str
alwaysOpen: bool # EventData.alwaysOpen
def parse_event(d: dict) -> Event:
return Event(
id=int(d['EventData']['name']['id']),
type=int(d['EventData']['infoType']),
detail=d['EventData']['name']['str'],
alwaysOpen=bool(d['EventData']['alwaysOpen'])
)
def add_migration(f_name: str, mysql: str):
(migration_path / 'mysql' / f_name).write_text(mysql)
(migration_path / 'mariadb' / f_name).write_text(mysql)
# Translate to sqlite
sqlite = sqlglot.transpile(mysql, read='mysql', write='sqlite', pretty=True)
(migration_path / 'sqlite' / f_name).write_text(';\n'.join(sqlite) + ';\n')
class Character(NamedTuple):
id: int # CharaData.name.id
name: str # CharaData.name.str
color_id: int # CharaData.color.id
color_name: str # CharaData.color.str
genre_id: int # CharaData.genre.id
genre_name: str # CharaData.genre.str
is_copyright: bool # CharaData.isCopyright
disable: bool # CharaData.disable
def parse_bool(s: str) -> bool:
if s == 'true' or s == '1':
return True
if s == 'false' or s == '0':
return False
raise ValueError(f'Invalid boolean value: {s}')
def parse_character(d: dict) -> Character:
return Character(
id=int(d['CharaData']['name']['id']),
name=d['CharaData']['name']['str'],
color_id=int(d['CharaData']['color']['id']),
color_name=d['CharaData']['color']['str'],
genre_id=int(d['CharaData']['genre']['id']),
genre_name=d['CharaData']['genre']['str'],
is_copyright=parse_bool(d['CharaData']['isCopyright']),
disable=parse_bool(d['CharaData']['disable'])
)
if __name__ == '__main__':
agupa = argparse.ArgumentParser(description='Convert maimai data to csv')
agupa.add_argument('path', type=Path, help='Path to A000 data folder')
args = agupa.parse_args()
path = Path(args.path)
src = Path(__file__).parent.parent
tickets = read_list('ticket', '*/Ticket.xml', parse_ticket)
events = read_list('event', '*/Event.xml', parse_event)
characters = read_list('chara', '*/Chara.xml', parse_character)
# Write incremental sql
# ids = [int(v.split(",")[0]) for v in (Path(__file__).parent / 'maimai2_game_event.csv').read_text().splitlines()]
# new_events = [e for e in events if e.id not in ids]
# sql = "INSERT INTO `maimai2_game_event` (`id`, `end_date`, `start_date`, `type`, `enable`) VALUES \n" + \
# ",\n".join([f"({e.id}, '2029-01-01 00:00:00.000000', '2019-01-01 00:00:00.000000', {e.type}, '1')" for e in new_events])
# sql += ";\n"
# write('sql/maimai2_game_event.sql', sql)
# Find the highest V{}__*.sql file in src/main/resources/db/migration/sqlite
migration_path = src / 'src/main/resources/db/migration'
last_sql_version = max([int(v.name[1:].split("__")[0]) for v in (migration_path / 'sqlite').glob('V*__*.sql')])
last_sql_version = 248
print(f"Last sql version: {last_sql_version}")
# Write ticket sql
sql = """
CREATE TABLE `maimai2_game_ticket` (
`id` bigint(20) NOT NULL,
`name` varchar(255) NOT NULL,
`credits` int(8) NOT NULL,
`kind` varchar(255) NOT NULL,
`max` int(16) NOT NULL,
`detail` varchar(255) NOT NULL,
`event_id` bigint(20) NOT NULL,
`event_name` varchar(255) NOT NULL
);\n\n"""
sql += "INSERT INTO `maimai2_game_ticket` (`id`, `name`, `credits`, `kind`, `max`, `detail`, `event_id`, `event_name`) VALUES \n" + \
",\n".join([f"({t.id}, '{t.name}', {t.credits}, '{t.kind}', {t.max}, '{t.detail}', {t.eventId}, '{t.eventName}')" for t in tickets])
sql += ";\n"
add_migration(f"V{last_sql_version + 1}__maimai2_tickets.sql", sql)