This commit is contained in:
libretroadmin 2022-07-17 19:45:58 +02:00
parent 9265b2189f
commit 3ea0d85453
16 changed files with 1937 additions and 15 deletions

View file

@ -458,12 +458,12 @@ static cdfs_track_t* cdfs_open_cue_track(
}
else if (!strncasecmp(line, "TRACK", 5))
{
char *ptr = NULL;
unsigned track_number = 0;
const char *track = line + 5;
const char *track = line + 5;
cdfs_skip_spaces(&track);
sscanf(track, "%d", (int*)&track_number);
track_number = (unsigned)strtol(track, &ptr, 10);
while (*track && *track != ' ' && *track != '\n')
++track;
@ -479,11 +479,8 @@ static cdfs_track_t* cdfs_open_cue_track(
sector_size = atoi(track + 6);
}
else
{
/* assume AUDIO */
else /* assume AUDIO */
sector_size = 2352;
}
}
else if (!strncasecmp(line, "INDEX", 5))
{
@ -635,10 +632,6 @@ cdfs_track_t* cdfs_open_raw_track(const char* path)
}
}
}
else
{
/* unsupported file type */
}
return track;
}

View file

@ -0,0 +1,46 @@
#ifndef _PS4_DEFINES_H
#define _PS4_DEFINES_H
#define PS4_MAX_ORBISPADS 16
#define PS4_MAX_PAD_PORT_TYPES 3
#define ORBISPAD_L3 0x00000002
#define ORBISPAD_R3 0x00000004
#define ORBISPAD_OPTIONS 0x00000008
#define ORBISPAD_UP 0x00000010
#define ORBISPAD_RIGHT 0x00000020
#define ORBISPAD_DOWN 0x00000040
#define ORBISPAD_LEFT 0x00000080
#define ORBISPAD_L2 0x00000100
#define ORBISPAD_R2 0x00000200
#define ORBISPAD_L1 0x00000400
#define ORBISPAD_R1 0x00000800
#define ORBISPAD_TRIANGLE 0x00001000
#define ORBISPAD_CIRCLE 0x00002000
#define ORBISPAD_CROSS 0x00004000
#define ORBISPAD_SQUARE 0x00008000
#define ORBISPAD_TOUCH_PAD 0x00100000
#define ORBISPAD_INTERCEPTED 0x80000000
#define SceUID uint32_t
#define SceKernelStat OrbisKernelStat
#define SCE_KERNEL_PRIO_FIFO_DEFAULT 700
#define SCE_AUDIO_OUT_PORT_TYPE_MAIN 0
#define SCE_AUDIO_OUT_MODE_STEREO 1
#define SCE_MOUSE_BUTTON_PRIMARY 0x00000001
#define SCE_MOUSE_BUTTON_SECONDARY 0x00000002
#define SCE_MOUSE_BUTTON_OPTIONAL 0x00000004
#define SCE_MOUSE_BUTTON_INTERCEPTED 0x80000000
#define SCE_MOUSE_OPEN_PARAM_MERGED 0x01
#define SCE_MOUSE_PORT_TYPE_STANDARD 0
#define SCE_DBG_KEYBOARD_PORT_TYPE_STANDARD 0
#define SCE_USER_SERVICE_MAX_LOGIN_USERS 16
#define SCE_USER_SERVICE_USER_ID_INVALID 0xFFFFFFFF
#define SCE_ORBISPAD_ERROR_ALREADY_OPENED 0x80920004
#define SCE_PAD_PORT_TYPE_STANDARD 0
#define SCE_PAD_PORT_TYPE_SPECIAL 2
#define SCE_PAD_PORT_TYPE_REMOTE_CONTROL 16
#define SCE_KERNEL_PROT_CPU_RW 0x02
#define SCE_KERNEL_MAP_FIXED 0x10
#endif

View file

@ -0,0 +1,203 @@
/* Copyright (C) 2022 The RetroArch team
*
* ---------------------------------------------------------------------------------------
* The following license statement only applies to this file (network_stream.h).
* ---------------------------------------------------------------------------------------
*
* Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _LIBRETRO_SDK_NETWORK_STREAM_H
#define _LIBRETRO_SDK_NETWORK_STREAM_H
#include <stddef.h>
#include <stdint.h>
#include <boolean.h>
#include <retro_common_api.h>
RETRO_BEGIN_DECLS
enum
{
NETSTREAM_SEEK_SET = 0,
NETSTREAM_SEEK_CUR,
NETSTREAM_SEEK_END
};
typedef struct netstream
{
void *buf;
size_t size;
size_t used;
size_t pos;
} netstream_t;
/**
* netstream_open:
*
* @stream : Pointer to a network stream object.
* @buf : Pre-allocated buffer. Pass NULL to dynamically allocate a buffer.
* @size : Buffer size. Pass 0 for no pre-allocated/initial buffer.
* @used : Buffer bytes in use. Ignored for non pre-allocated buffers.
*
* Opens a network stream.
*
* Returns: true on success, false otherwise.
*/
bool netstream_open(netstream_t *stream, void *buf, size_t size, size_t used);
/**
* netstream_close:
*
* @stream : Pointer to a network stream object.
* @dealloc : Whether to deallocate/free the buffer or not.
*
* Closes a network stream.
*
*/
void netstream_close(netstream_t *stream, bool dealloc);
/**
* netstream_reset:
*
* @stream : Pointer to a network stream object.
*
* Resets a network stream to its initial position,
* discarding any used bytes in the process.
*
*/
void netstream_reset(netstream_t *stream);
/**
* netstream_truncate:
*
* @stream : Pointer to a network stream object.
* @used : Amount of bytes used.
*
* Truncates the network stream.
* Truncation can either extend or reduce the amount of bytes used.
*
* Returns: true on success, false otherwise.
*/
bool netstream_truncate(netstream_t *stream, size_t used);
/**
* netstream_data:
*
* @stream : Pointer to a network stream object.
* @data : Pointer to an object to store a reference of the stream's data.
* @len : Pointer to an object to store the amount of bytes in use.
*
* Gets the network stream's data.
*
*/
void netstream_data(netstream_t *stream, void **data, size_t *len);
/**
* netstream_tell:
*
* @stream : Pointer to a network stream object.
*
* Gets the network stream's current position.
*
* Returns: current value of the position indicator.
*/
size_t netstream_tell(netstream_t *stream);
/**
* netstream_seek:
*
* @stream : Pointer to a network stream object.
* @offset : Position's offset.
* @origin : Position used as reference for the offset.
*
* Sets the network stream's current position.
*
* Returns: true on success, false otherwise.
*/
bool netstream_seek(netstream_t *stream, long offset, int origin);
/**
* netstream_read:
*
* @stream : Pointer to a network stream object.
* @data : Pointer to a storage for data read from the network stream.
* @len : Amount of bytes to read. Pass 0 to read all remaining bytes.
*
* Reads raw data from the network stream.
*
* Returns: true on success, false otherwise.
*/
bool netstream_read(netstream_t *stream, void *data, size_t len);
/**
* netstream_read_(type):
*
* @stream : Pointer to a network stream object.
* @data : Pointer to a storage for data read from the network stream.
*
* Reads data from the network stream.
* Network byte order is always big endian.
*
* Returns: true on success, false otherwise.
*/
bool netstream_read_byte(netstream_t *stream, uint8_t *data);
bool netstream_read_word(netstream_t *stream, uint16_t *data);
bool netstream_read_dword(netstream_t *stream, uint32_t *data);
bool netstream_read_qword(netstream_t *stream, uint64_t *data);
#ifdef __STDC_IEC_559__
bool netstream_read_float(netstream_t *stream, float *data);
bool netstream_read_double(netstream_t *stream, double *data);
#endif
/**
* netstream_write:
*
* @stream : Pointer to a network stream object.
* @data : Data to write into the network stream.
* @len : Amount of bytes to write.
*
* Writes raw data into the network stream.
*
* Returns: true on success, false otherwise.
*/
bool netstream_write(netstream_t *stream, const void *data, size_t len);
/**
* netstream_write_(type):
*
* @stream : Pointer to a network stream object.
* @data : Data to write into the network stream.
*
* Writes data into the network stream.
* Network byte order is always big endian.
*
* Returns: true on success, false otherwise.
*/
bool netstream_write_byte(netstream_t *stream, uint8_t data);
bool netstream_write_word(netstream_t *stream, uint16_t data);
bool netstream_write_dword(netstream_t *stream, uint32_t data);
bool netstream_write_qword(netstream_t *stream, uint64_t data);
#ifdef __STDC_IEC_559__
bool netstream_write_float(netstream_t *stream, float data);
bool netstream_write_double(netstream_t *stream, double data);
#endif
RETRO_END_DECLS
#endif

55
libco/ps3.S Normal file
View file

@ -0,0 +1,55 @@
.globl .co_swap_asm
.globl co_swap_asm
.type .co_swap_asm, @function
.type co_swap_asm, @function
.co_swap_asm:
co_swap_asm:
mfcr 8
std 1,40(4)
mflr 9
std 14,72(4)
std 15,80(4)
std 16,88(4)
std 17,96(4)
std 18,104(4)
std 19,112(4)
std 20,120(4)
std 21,128(4)
std 22,136(4)
std 23,144(4)
std 24,152(4)
std 25,160(4)
std 26,168(4)
std 27,176(4)
std 28,184(4)
std 29,192(4)
std 30,200(4)
std 31,208(4)
std 9,32(4)
ld 7,32(3)
ld 1,40(3)
bl swap
trap
swap: stw 8,48(4)
lwz 6,48(3)
mtctr 7
ld 14,72(3)
ld 15,80(3)
ld 16,88(3)
ld 17,96(3)
ld 18,104(3)
ld 19,112(3)
ld 20,120(3)
ld 21,128(3)
ld 22,136(3)
ld 23,144(3)
ld 24,152(3)
ld 25,160(3)
ld 26,168(3)
ld 27,176(3)
ld 28,184(3)
ld 29,192(3)
ld 30,200(3)
ld 31,208(3)
mtcr 6
bctr

View file

@ -149,8 +149,8 @@ bool media_detect_cd_info_cue(const char *path, media_detect_cd_info_t *info)
if (!string_is_empty(track))
{
unsigned track_number = 0;
sscanf(track, "%d", (int*)&track_number);
char *ptr = NULL;
unsigned track_number = (unsigned)strtol(track, &ptr, 10);
#ifdef MEDIA_CUE_PARSE_DEBUG
printf("Found track: %d\n", track_number);
fflush(stdout);
@ -185,8 +185,8 @@ bool media_detect_cd_info_cue(const char *path, media_detect_cd_info_t *info)
if (!string_is_empty(index))
{
unsigned index_number = 0;
sscanf(index, "%d", (int*)&index_number);
char *ptr = NULL;
unsigned index_number = (unsigned)strtol(index, &ptr, 10);
if (index_number == 1)
{

View file

@ -0,0 +1,41 @@
# Recreate libretro_core_options_intl.h using translations form Crowdin
name: Crowdin Translation Integration
on:
push:
branches:
- master
paths:
- 'intl/*/*'
jobs:
create_intl_file:
runs-on: ubuntu-latest
steps:
- name: Setup Python
uses: actions/setup-python@v2
- name: Checkout
uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token.
fetch-depth: 0 # otherwise, there would be errors pushing refs to the destination repository.
- name: Create intl file
shell: bash
run: |
python3 intl/crowdin_intl.py '<path/to/libretro_core_options.h directory>'
- name: Commit files
run: |
git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git add <path/to/libretro_core_options_intl.h file>
git commit -m "Recreate libretro_core_options_intl.h" -a
- name: GitHub Push
uses: ad-m/github-push-action@v0.6.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}

View file

@ -0,0 +1,41 @@
# Prepare source for Crowdin sync
name: Crowdin Upload Preparation
on:
push:
branches:
- master
paths:
- '<path/to/libretro_core_options.h file>'
jobs:
prepare_source_file:
runs-on: ubuntu-latest
steps:
- name: Setup Python
uses: actions/setup-python@v2
- name: Checkout
uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token.
fetch-depth: 0 # otherwise, there would be errors pushing refs to the destination repository.
- name: Crowdin Prep
shell: bash
run: |
python3 intl/crowdin_prep.py '<path/to/libretro_core_options.h directory>'
- name: Commit files
run: |
git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git add intl/*
git commit -m "Recreate translation source text files" -a
- name: GitHub Push
uses: ad-m/github-push-action@v0.6.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}

View file

@ -0,0 +1,3 @@
files:
- source: /intl/_us/*.json
translation: /intl/_%two_letters_code%/%original_file_name%

View file

@ -0,0 +1,47 @@
Place 'crowdin.yml' & the 'intl' and '.github' folder, including content, into the root of the repo.
In '.github/workflows' are two files: 'crowdin_intl.yml' & 'crowdin_prep.yml'
In each of those are place holders, which need to be replaced as follows:
<path/to/libretro_core_options.h directory>
-> replace with the path from the root of the repo to the directory containing
'libretro_core_options.h' (it is assumed that 'libretro_core_options.h' &
'libretro_core_options_intl.h' are in the same directory)
<path/to/libretro_core_options.h file>
-> replace with the full path from the root of the repo to the 'libretro_core_options.h' file
<path/to/libretro_core_options_intl.h file>
-> replace with the full path from the root of the repo to the 'libretro_core_options_intl.h' file
From the root of the repo run (using bash):
python3 intl/core_opt_translation.py '<path/to/libretro_core_options.h directory>'
(If python3 doesn't work, try just python)
Push changes to repo. Once merged, request Crowdin integration.
Crowdin integration:
On the project page, go to the Applications tab. Choose GitHub.
There are two options: connecting a GitHub account, which has write/commit permissions to the repo
or providing a GitHub token, which will unlock these permissions.
Then add a repository, a new interface opens. Pick the repository as well as the branch, which you want to sync.
On the right, Crowdin will display the default name of the repository it will use for creating PRs.
Below, set the sync schedule and then save. With that the synchronisation should be set up.
If there are still problems, you might need to manually modify the configuration (double click on the branch in the lower frame).
Here's what the file paths should look like (the '/' at the start is very important!):
Source files path:
/intl/_us/*.json
Translated files path:
/intl/_%two_letters_code%/%original_file_name%
Once Crowdin successfully creates the PR & it has been merged, the automatically created branch can be deleted on GitHub.

View file

@ -0,0 +1 @@
__pycache__

View file

@ -0,0 +1,609 @@
#!/usr/bin/env python3
"""Core options text extractor
The purpose of this script is to set up & provide functions for automatic generation of 'libretro_core_options_intl.h'
from 'libretro_core_options.h' using translations from Crowdin.
Both v1 and v2 structs are supported. It is, however, recommended to convert v1 files to v2 using the included
'v1_to_v2_converter.py'.
Usage:
python3 path/to/core_opt_translation.py "path/to/where/libretro_core_options.h & libretro_core_options_intl.h/are"
This script will:
1.) create key words for & extract the texts from libretro_core_options.h & save them into intl/_us/core_options.h
2.) do the same for any present translations in libretro_core_options_intl.h, saving those in their respective folder
"""
import core_option_regex as cor
import re
import os
import sys
import json
import urllib.request as req
import shutil
# for uploading translations to Crowdin, the Crowdin 'language id' is required
LANG_CODE_TO_ID = {'_ar': 'ar',
'_ast': 'ast',
'_chs': 'zh-CN',
'_cht': 'zh-TW',
'_cs': 'cs',
'_cy': 'cy',
'_da': 'da',
'_de': 'de',
'_el': 'el',
'_eo': 'eo',
'_es': 'es-ES',
'_fa': 'fa',
'_fi': 'fi',
'_fr': 'fr',
'_gl': 'gl',
'_he': 'he',
'_hu': 'hu',
'_id': 'id',
'_it': 'it',
'_ja': 'ja',
'_ko': 'ko',
'_nl': 'nl',
'_pl': 'pl',
'_pt_br': 'pt-BR',
'_pt_pt': 'pt-PT',
'_ru': 'ru',
'_sk': 'sk',
'_sv': 'sv-SE',
'_tr': 'tr',
'_uk': 'uk',
'_vn': 'vi'}
LANG_CODE_TO_R_LANG = {'_ar': 'RETRO_LANGUAGE_ARABIC',
'_ast': 'RETRO_LANGUAGE_ASTURIAN',
'_chs': 'RETRO_LANGUAGE_CHINESE_SIMPLIFIED',
'_cht': 'RETRO_LANGUAGE_CHINESE_TRADITIONAL',
'_cs': 'RETRO_LANGUAGE_CZECH',
'_cy': 'RETRO_LANGUAGE_WELSH',
'_da': 'RETRO_LANGUAGE_DANISH',
'_de': 'RETRO_LANGUAGE_GERMAN',
'_el': 'RETRO_LANGUAGE_GREEK',
'_eo': 'RETRO_LANGUAGE_ESPERANTO',
'_es': 'RETRO_LANGUAGE_SPANISH',
'_fa': 'RETRO_LANGUAGE_PERSIAN',
'_fi': 'RETRO_LANGUAGE_FINNISH',
'_fr': 'RETRO_LANGUAGE_FRENCH',
'_gl': 'RETRO_LANGUAGE_GALICIAN',
'_he': 'RETRO_LANGUAGE_HEBREW',
'_hu': 'RETRO_LANGUAGE_HUNGARIAN',
'_id': 'RETRO_LANGUAGE_INDONESIAN',
'_it': 'RETRO_LANGUAGE_ITALIAN',
'_ja': 'RETRO_LANGUAGE_JAPANESE',
'_ko': 'RETRO_LANGUAGE_KOREAN',
'_nl': 'RETRO_LANGUAGE_DUTCH',
'_pl': 'RETRO_LANGUAGE_POLISH',
'_pt_br': 'RETRO_LANGUAGE_PORTUGUESE_BRAZIL',
'_pt_pt': 'RETRO_LANGUAGE_PORTUGUESE_PORTUGAL',
'_ru': 'RETRO_LANGUAGE_RUSSIAN',
'_sk': 'RETRO_LANGUAGE_SLOVAK',
'_sv': 'RETRO_LANGUAGE_SWEDISH',
'_tr': 'RETRO_LANGUAGE_TURKISH',
'_uk': 'RETRO_LANGUAGE_UKRAINIAN',
'_us': 'RETRO_LANGUAGE_ENGLISH',
'_vn': 'RETRO_LANGUAGE_VIETNAMESE'}
# these are handled by RetroArch directly - no need to include them in core translations
ON_OFFS = {'"enabled"', '"disabled"', '"true"', '"false"', '"on"', '"off"'}
def remove_special_chars(text: str, char_set=0) -> str:
"""Removes special characters from a text.
:param text: String to be cleaned.
:param char_set: 0 -> remove all ASCII special chars except for '_' & 'space';
1 -> remove invalid chars from file names
:return: Clean text.
"""
command_chars = [chr(unicode) for unicode in tuple(range(0, 32)) + (127,)]
special_chars = ([chr(unicode) for unicode in tuple(range(33, 48)) + tuple(range(58, 65)) + tuple(range(91, 95))
+ (96,) + tuple(range(123, 127))],
('\\', '/', ':', '*', '?', '"', '<', '>', '|'))
res = text
for cm in command_chars:
res = res.replace(cm, '_')
for sp in special_chars[char_set]:
res = res.replace(sp, '_')
while res.startswith('_'):
res = res[1:]
while res.endswith('_'):
res = res[:-1]
return res
def clean_file_name(file_name: str) -> str:
"""Removes characters which might make file_name inappropriate for files on some OS.
:param file_name: File name to be cleaned.
:return: The clean file name.
"""
file_name = remove_special_chars(file_name, 1)
file_name = re.sub(r'__+', '_', file_name.replace(' ', '_'))
return file_name
def get_struct_type_name(decl: str) -> tuple:
""" Returns relevant parts of the struct declaration:
type, name of the struct and the language appendix, if present.
:param decl: The struct declaration matched by cor.p_type_name.
:return: Tuple, e.g.: ('retro_core_option_definition', 'option_defs_us', '_us')
"""
struct_match = cor.p_type_name.search(decl)
if struct_match:
if struct_match.group(3):
struct_type_name = struct_match.group(1, 2, 3)
return struct_type_name
elif struct_match.group(4):
struct_type_name = struct_match.group(1, 2, 4)
return struct_type_name
else:
struct_type_name = struct_match.group(1, 2)
return struct_type_name
else:
raise ValueError(f'No or incomplete struct declaration: {decl}!\n'
'Please make sure all structs are complete, including the type and name declaration.')
def is_viable_non_dupe(text: str, comparison) -> bool:
"""text must be longer than 2 ('""'), not 'NULL' and not in comparison.
:param text: String to be tested.
:param comparison: Dictionary or set to search for text in.
:return: bool
"""
return 2 < len(text) and text != 'NULL' and text not in comparison
def is_viable_value(text: str) -> bool:
"""text must be longer than 2 ('""'), not 'NULL' and text.lower() not in
{'"enabled"', '"disabled"', '"true"', '"false"', '"on"', '"off"'}.
:param text: String to be tested.
:return: bool
"""
return 2 < len(text) and text != 'NULL' and text.lower() not in ON_OFFS
def create_non_dupe(base_name: str, opt_num: int, comparison) -> str:
"""Makes sure base_name is not in comparison, and if it is it's renamed.
:param base_name: Name to check/make unique.
:param opt_num: Number of the option base_name belongs to, used in making it unique.
:param comparison: Dictionary or set to search for base_name in.
:return: Unique name.
"""
h = base_name
if h in comparison:
n = 0
h = h + '_O' + str(opt_num)
h_end = len(h)
while h in comparison:
h = h[:h_end] + '_' + str(n)
n += 1
return h
def get_texts(text: str) -> dict:
"""Extracts the strings, which are to be translated/are the translations,
from text and creates macro names for them.
:param text: The string to be parsed.
:return: Dictionary of the form { '_<lang>': { 'macro': 'string', ... }, ... }.
"""
# all structs: group(0) full struct, group(1) beginning, group(2) content
structs = cor.p_struct.finditer(text)
hash_n_string = {}
just_string = {}
for struct in structs:
struct_declaration = struct.group(1)
struct_type_name = get_struct_type_name(struct_declaration)
if 3 > len(struct_type_name):
lang = '_us'
else:
lang = struct_type_name[2]
if lang not in just_string:
hash_n_string[lang] = {}
just_string[lang] = set()
is_v2 = False
pre_name = ''
p = cor.p_info
if 'retro_core_option_v2_definition' == struct_type_name[0]:
is_v2 = True
elif 'retro_core_option_v2_category' == struct_type_name[0]:
pre_name = 'CATEGORY_'
p = cor.p_info_cat
struct_content = struct.group(2)
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
struct_options = cor.p_option.finditer(struct_content)
for opt, option in enumerate(struct_options):
# group 1: key
if option.group(1):
opt_name = pre_name + option.group(1)
# no special chars allowed in key
opt_name = remove_special_chars(opt_name).upper().replace(' ', '_')
else:
raise ValueError(f'No option name (key) found in struct {struct_type_name[1]} option {opt}!')
# group 2: description0
if option.group(2):
desc0 = option.group(2)
if is_viable_non_dupe(desc0, just_string[lang]):
just_string[lang].add(desc0)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_LABEL'), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = desc0
else:
raise ValueError(f'No label found in struct {struct_type_name[1]} option {option.group(1)}!')
# group 3: desc1, info0, info1, category
if option.group(3):
infos = option.group(3)
option_info = p.finditer(infos)
if is_v2:
desc1 = next(option_info).group(1)
if is_viable_non_dupe(desc1, just_string[lang]):
just_string[lang].add(desc1)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_LABEL_CAT'), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = desc1
last = None
m_h = None
for j, info in enumerate(option_info):
last = info.group(1)
if is_viable_non_dupe(last, just_string[lang]):
just_string[lang].add(last)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_INFO_{j}'), opt,
hash_n_string[lang])
hash_n_string[lang][m_h] = last
if last in just_string[lang]: # category key should not be translated
hash_n_string[lang].pop(m_h)
just_string[lang].remove(last)
else:
for j, info in enumerate(option_info):
gr1 = info.group(1)
if is_viable_non_dupe(gr1, just_string[lang]):
just_string[lang].add(gr1)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_INFO_{j}'), opt,
hash_n_string[lang])
hash_n_string[lang][m_h] = gr1
else:
raise ValueError(f'Too few arguments in struct {struct_type_name[1]} option {option.group(1)}!')
# group 4:
if option.group(4):
for j, kv_set in enumerate(cor.p_key_value.finditer(option.group(4))):
set_key, set_value = kv_set.group(1, 2)
if not is_viable_value(set_value):
if not is_viable_value(set_key):
continue
set_value = set_key
# re.fullmatch(r'(?:[+-][0-9]+)+', value[1:-1])
if set_value not in just_string[lang] and not re.sub(r'[+-]', '', set_value[1:-1]).isdigit():
clean_key = set_key.encode('ascii', errors='ignore').decode('unicode-escape')[1:-1]
clean_key = remove_special_chars(clean_key).upper().replace(' ', '_')
m_h = create_non_dupe(re.sub(r'__+', '_', f"OPTION_VAL_{clean_key}"), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = set_value
just_string[lang].add(set_value)
return hash_n_string
def create_msg_hash(intl_dir_path: str, core_name: str, keyword_string_dict: dict) -> dict:
"""Creates '<core_name>.h' files in 'intl/_<lang>/' containing the macro name & string combinations.
:param intl_dir_path: Path to the intl directory.
:param core_name: Name of the core, used for naming the files.
:param keyword_string_dict: Dictionary of the form { '_<lang>': { 'macro': 'string', ... }, ... }.
:return: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.h)', ... }.
"""
files = {}
for localisation in keyword_string_dict:
path = os.path.join(intl_dir_path, localisation) # intl/_<lang>
files[localisation] = os.path.join(path, core_name + '.h') # intl/_<lang>/<core_name>.h
if not os.path.exists(path):
os.makedirs(path)
with open(files[localisation], 'w', encoding='utf-8') as crowdin_file:
out_text = ''
for keyword in keyword_string_dict[localisation]:
out_text = f'{out_text}{keyword} {keyword_string_dict[localisation][keyword]}\n'
crowdin_file.write(out_text)
return files
def h2json(file_paths: dict) -> dict:
"""Converts .h files pointed to by file_paths into .jsons.
:param file_paths: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.h)', ... }.
:return: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.json)', ... }.
"""
jsons = {}
for file_lang in file_paths:
jsons[file_lang] = file_paths[file_lang][:-2] + '.json'
p = cor.p_masked
with open(file_paths[file_lang], 'r+', encoding='utf-8') as h_file:
text = h_file.read()
result = p.finditer(text)
messages = {}
for msg in result:
key, val = msg.group(1, 2)
if key not in messages:
if key and val:
# unescape & remove "\n"
messages[key] = re.sub(r'"\s*(?:(?:/\*(?:.|[\r\n])*?\*/|//.*[\r\n]+)\s*)*"',
'\\\n', val[1:-1].replace('\\\"', '"'))
else:
print(f"DUPLICATE KEY in {file_paths[file_lang]}: {key}")
with open(jsons[file_lang], 'w', encoding='utf-8') as json_file:
json.dump(messages, json_file, indent=2)
return jsons
def json2h(intl_dir_path: str, json_file_path: str, core_name: str) -> None:
"""Converts .json file in json_file_path into an .h ready to be included in C code.
:param intl_dir_path: Path to the intl directory.
:param json_file_path: Base path of translation .json.
:param core_name: Name of the core, required for naming the files.
:return: None
"""
h_filename = os.path.join(json_file_path, core_name + '.h')
json_filename = os.path.join(json_file_path, core_name + '.json')
file_lang = os.path.basename(json_file_path).upper()
if os.path.basename(json_file_path).lower() == '_us':
print(' skipped')
return
p = cor.p_masked
def update(s_messages, s_template, s_source_messages):
translation = ''
template_messages = p.finditer(s_template)
for tp_msg in template_messages:
old_key = tp_msg.group(1)
if old_key in s_messages and s_messages[old_key] != s_source_messages[old_key]:
tl_msg_val = s_messages[old_key]
tl_msg_val = tl_msg_val.replace('"', '\\\"').replace('\n', '') # escape
translation = ''.join((translation, '#define ', old_key, file_lang, f' "{tl_msg_val}"\n'))
else: # Remove English duplicates and non-translatable strings
translation = ''.join((translation, '#define ', old_key, file_lang, ' NULL\n'))
return translation
with open(os.path.join(intl_dir_path, '_us', core_name + '.h'), 'r', encoding='utf-8') as template_file:
template = template_file.read()
with open(os.path.join(intl_dir_path, '_us', core_name + '.json'), 'r+', encoding='utf-8') as source_json_file:
source_messages = json.load(source_json_file)
with open(json_filename, 'r+', encoding='utf-8') as json_file:
messages = json.load(json_file)
new_translation = update(messages, template, source_messages)
with open(h_filename, 'w', encoding='utf-8') as h_file:
h_file.seek(0)
h_file.write(new_translation)
h_file.truncate()
return
def get_crowdin_client(dir_path: str) -> str:
"""Makes sure the Crowdin CLI client is present. If it isn't, it is fetched & extracted.
:return: The path to 'crowdin-cli.jar'.
"""
jar_name = 'crowdin-cli.jar'
jar_path = os.path.join(dir_path, jar_name)
if not os.path.isfile(jar_path):
print('Downloading crowdin-cli.jar')
crowdin_cli_file = os.path.join(dir_path, 'crowdin-cli.zip')
crowdin_cli_url = 'https://downloads.crowdin.com/cli/v3/crowdin-cli.zip'
req.urlretrieve(crowdin_cli_url, crowdin_cli_file)
import zipfile
with zipfile.ZipFile(crowdin_cli_file, 'r') as zip_ref:
jar_dir = zip_ref.namelist()[0]
for file in zip_ref.namelist():
if file.endswith(jar_name):
jar_file = file
break
zip_ref.extract(jar_file)
os.rename(jar_file, jar_path)
os.remove(crowdin_cli_file)
shutil.rmtree(jar_dir)
return jar_path
def create_intl_file(intl_file_path: str, intl_dir_path: str, text: str, core_name: str, file_path: str) -> None:
"""Creates 'libretro_core_options_intl.h' from Crowdin translations.
:param intl_file_path: Path to 'libretro_core_options_intl.h'
:param intl_dir_path: Path to the intl directory.
:param text: Content of the 'libretro_core_options.h' being translated.
:param core_name: Name of the core. Needed to identify the files to pull the translations from.
:param file_path: Path to the '<core name>_us.h' file, containing the original English texts.
:return: None
"""
msg_dict = {}
lang_up = ''
def replace_pair(pair_match):
"""Replaces a key-value-pair of an option with the macros corresponding to the language.
:param pair_match: The re match object representing the key-value-pair block.
:return: Replacement string.
"""
offset = pair_match.start(0)
if pair_match.group(1): # key
if pair_match.group(2) in msg_dict: # value
val = msg_dict[pair_match.group(2)] + lang_up
elif pair_match.group(1) in msg_dict: # use key if value not viable (e.g. NULL)
val = msg_dict[pair_match.group(1)] + lang_up
else:
return pair_match.group(0)
else:
return pair_match.group(0)
res = pair_match.group(0)[:pair_match.start(2) - offset] + val \
+ pair_match.group(0)[pair_match.end(2) - offset:]
return res
def replace_info(info_match):
"""Replaces the 'additional strings' of an option with the macros corresponding to the language.
:param info_match: The re match object representing the 'additional strings' block.
:return: Replacement string.
"""
offset = info_match.start(0)
if info_match.group(1) in msg_dict:
res = info_match.group(0)[:info_match.start(1) - offset] + \
msg_dict[info_match.group(1)] + lang_up + \
info_match.group(0)[info_match.end(1) - offset:]
return res
else:
return info_match.group(0)
def replace_option(option_match):
"""Replaces strings within an option
'{ "opt_key", "label", "additional strings", ..., { {"key", "value"}, ... }, ... }'
within a struct with the macros corresponding to the language:
'{ "opt_key", MACRO_LABEL, MACRO_STRINGS, ..., { {"key", MACRO_VALUE}, ... }, ... }'
:param option_match: The re match object representing the option.
:return: Replacement string.
"""
# label
offset = option_match.start(0)
if option_match.group(2):
res = option_match.group(0)[:option_match.start(2) - offset] + msg_dict[option_match.group(2)] + lang_up
else:
return option_match.group(0)
# additional block
if option_match.group(3):
res = res + option_match.group(0)[option_match.end(2) - offset:option_match.start(3) - offset]
new_info = p.sub(replace_info, option_match.group(3))
res = res + new_info
else:
return res + option_match.group(0)[option_match.end(2) - offset:]
# key-value-pairs
if option_match.group(4):
res = res + option_match.group(0)[option_match.end(3) - offset:option_match.start(4) - offset]
new_pairs = cor.p_key_value.sub(replace_pair, option_match.group(4))
res = res + new_pairs + option_match.group(0)[option_match.end(4) - offset:]
else:
res = res + option_match.group(0)[option_match.end(3) - offset:]
return res
with open(file_path, 'r+', encoding='utf-8') as template: # intl/_us/<core_name>.h
masked_msgs = cor.p_masked.finditer(template.read())
for msg in masked_msgs:
msg_dict[msg.group(2)] = msg.group(1)
with open(intl_file_path, 'r', encoding='utf-8') as intl: # libretro_core_options_intl.h
in_text = intl.read()
intl_start = re.search(re.escape('/*\n'
' ********************************\n'
' * Core Option Definitions\n'
' ********************************\n'
'*/\n'), in_text)
if intl_start:
out_txt = in_text[:intl_start.end(0)]
else:
intl_start = re.search(re.escape('#ifdef __cplusplus\n'
'extern "C" {\n'
'#endif\n'), in_text)
out_txt = in_text[:intl_start.end(0)]
for folder in os.listdir(intl_dir_path): # intl/_*
if os.path.isdir(os.path.join(intl_dir_path, folder)) and folder.startswith('_')\
and folder != '_us' and folder != '__pycache__':
translation_path = os.path.join(intl_dir_path, folder, core_name + '.h') # <core_name>_<lang>.h
# all structs: group(0) full struct, group(1) beginning, group(2) content
struct_groups = cor.p_struct.finditer(text)
lang_up = folder.upper()
lang_low = folder.lower()
out_txt = out_txt + f'/* {LANG_CODE_TO_R_LANG[lang_low]} */\n\n' # /* RETRO_LANGUAGE_NAME */
with open(translation_path, 'r+', encoding='utf-8') as f_in: # <core name>.h
out_txt = out_txt + f_in.read() + '\n'
for construct in struct_groups:
declaration = construct.group(1)
struct_type_name = get_struct_type_name(declaration)
if 3 > len(struct_type_name): # no language specifier
new_decl = re.sub(re.escape(struct_type_name[1]), struct_type_name[1] + lang_low, declaration)
else:
new_decl = re.sub(re.escape(struct_type_name[2]), lang_low, declaration)
if '_us' != struct_type_name[2]:
continue
p = cor.p_info
if 'retro_core_option_v2_category' == struct_type_name[0]:
p = cor.p_info_cat
offset_construct = construct.start(0)
start = construct.end(1) - offset_construct
end = construct.start(2) - offset_construct
out_txt = out_txt + new_decl + construct.group(0)[start:end]
content = construct.group(2)
new_content = cor.p_option.sub(replace_option, content)
start = construct.end(2) - offset_construct
out_txt = out_txt + new_content + construct.group(0)[start:] + '\n'
if 'retro_core_option_v2_definition' == struct_type_name[0]:
out_txt = out_txt + f'struct retro_core_options_v2 options{lang_low}' \
' = {\n' \
f' option_cats{lang_low},\n' \
f' option_defs{lang_low}\n' \
'};\n\n'
# shutil.rmtree(JOINER.join((intl_dir_path, folder)))
with open(intl_file_path, 'w', encoding='utf-8') as intl:
intl.write(out_txt + '\n#ifdef __cplusplus\n'
'}\n#endif\n'
'\n#endif')
return
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
#
try:
if os.path.isfile(sys.argv[1]):
_temp = os.path.dirname(sys.argv[1])
else:
_temp = sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
H_FILE_PATH = os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = os.path.join(TARGET_DIR_PATH, 'libretro_core_options_intl.h')
_core_name = 'core_options'
try:
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = get_texts(_main_text)
_files = create_msg_hash(DIR_PATH, _core_name, _hash_n_str)
_source_jsons = h2json(_files)
except Exception as e:
print(e)
print('Getting texts from libretro_core_options_intl.h')
with open(INTL_FILE_PATH, 'r+', encoding='utf-8') as _intl_file:
_intl_text = _intl_file.read()
_hash_n_str_intl = get_texts(_intl_text)
_intl_files = create_msg_hash(DIR_PATH, _core_name, _hash_n_str_intl)
_intl_jsons = h2json(_intl_files)
print('\nAll done!')

View file

@ -0,0 +1,95 @@
import re
# 0: full struct; 1: up to & including first []; 2: content between first {}
p_struct = re.compile(r'(struct\s*[a-zA-Z0-9_\s]+\[])\s*'
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*'
r'=\s*' # =
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*'
r'{((?:.|[\r\n])*?)\{\s*NULL,\s*NULL,\s*NULL\s*(?:.|[\r\n])*?},?(?:.|[\r\n])*?};') # captures full struct, it's beginning and it's content
# 0: type name[]; 1: type; 2: name
p_type_name = re.compile(r'(retro_core_option_[a-zA-Z0-9_]+)\s*'
r'(option_cats([a-z_]{0,8})|option_defs([a-z_]{0,8}))\s*\[]')
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
p_option = re.compile(r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(\".*?\"|' # key start; group 1
r'[a-zA-Z0-9_]+\s*\((?:.|[\r\n])*?\)|'
r'[a-zA-Z0-9_]+\s*\[(?:.|[\r\n])*?]|'
r'[a-zA-Z0-9_]+\s*\".*?\")\s*' # key end
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(\".*?\")\s*' # description; group 2
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'((?:' # group 3
r'(?:NULL|\"(?:.|[\r\n])*?\")\s*' # description in category, info, info in category, category
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')+)'
r'(?:' # defs only start
r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'((?:' # key/value pairs start; group 4
r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:NULL|\".*?\")\s*' # option key
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:NULL|\".*?\")\s*' # option value
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'}\s*' # closing braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')*)' # key/value pairs end
r'}\s*' # closing braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:' # defaults start
r'(?:NULL|\".*?\")\s*' # default value
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')*' # defaults end
r')?' # defs only end
r'},') # closing braces
# analyse option group 3
p_info = re.compile(r'(NULL|\"(?:.|[\r\n])*?\")\s*' # description in category, info, info in category, category
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',')
p_info_cat = re.compile(r'(NULL|\"(?:.|[\r\n])*?\")')
# analyse option group 4
p_key_value = re.compile(r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(NULL|\".*?\")\s*' # option key; 1
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(NULL|\".*?\")\s*' # option value; 2
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'}')
p_masked = re.compile(r'([A-Z_][A-Z0-9_]+)\s*(\"(?:"\s*"|\\\s*|.)*\")')
p_intl = re.compile(r'(struct retro_core_option_definition \*option_defs_intl\[RETRO_LANGUAGE_LAST]) = {'
r'((?:.|[\r\n])*?)};')
p_set = re.compile(r'static INLINE void libretro_set_core_options\(retro_environment_t environ_cb\)'
r'(?:.|[\r\n])*?};?\s*#ifdef __cplusplus\s*}\s*#endif')
p_yaml = re.compile(r'"project_id": "[0-9]+".*\s*'
r'"api_token": "([a-zA-Z0-9]+)".*\s*'
r'"base_path": "\./intl".*\s*'
r'"base_url": "https://api\.crowdin\.com".*\s*'
r'"preserve_hierarchy": true.*\s*'
r'"files": \[\s*'
r'\{\s*'
r'"source": "/_us/\*\.json",.*\s*'
r'"translation": "/_%two_letters_code%/%original_file_name%",.*\s*'
r'"skip_untranslated_strings": true.*\s*'
r'},\s*'
r']')

View file

@ -0,0 +1,43 @@
#!/usr/bin/env python3
import core_opt_translation as t
if __name__ == '__main__':
try:
if t.os.path.isfile(t.sys.argv[1]):
_temp = t.os.path.dirname(t.sys.argv[1])
else:
_temp = t.sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = t.os.path.dirname(t.os.path.dirname(t.os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
H_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options_intl.h')
_core_name = 'core_options'
_core_name = t.clean_file_name(_core_name)
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = t.get_texts(_main_text)
_files = t.create_msg_hash(DIR_PATH, _core_name, _hash_n_str)
print('Converting translations *.json to *.h:')
for _folder in t.os.listdir(DIR_PATH):
if t.os.path.isdir(t.os.path.join(DIR_PATH, _folder))\
and _folder.startswith('_')\
and _folder != '__pycache__':
print(_folder)
t.json2h(DIR_PATH, t.os.path.join(DIR_PATH, _folder), _core_name)
print('Constructing libretro_core_options_intl.h')
t.create_intl_file(INTL_FILE_PATH, DIR_PATH, _main_text, _core_name, _files['_us'])
print('\nAll done!')

View file

@ -0,0 +1,34 @@
#!/usr/bin/env python3
import core_opt_translation as t
if __name__ == '__main__':
_core_name = 'core_options'
try:
if t.os.path.isfile(t.sys.argv[1]):
_temp = t.os.path.dirname(t.sys.argv[1])
else:
_temp = t.sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = t.os.path.dirname(t.os.path.dirname(t.os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
H_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
_core_name = t.clean_file_name(_core_name)
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = t.get_texts(_main_text)
_files = t.create_msg_hash(DIR_PATH, _core_name, _hash_n_str)
_source_jsons = t.h2json(_files)
print('\nAll done!')

View file

@ -0,0 +1,459 @@
#!/usr/bin/env python3
"""Core options v1 to v2 converter
Just run this script as follows, to convert 'libretro_core_options.h' & 'Libretro_coreoptions_intl.h' to v2:
python3 "/path/to/v1_to_v2_converter.py" "/path/to/where/libretro_core_options.h & Libretro_coreoptions_intl.h/are"
The original files will be preserved as *.v1
"""
import core_option_regex as cor
import os
import sys
def create_v2_code_file(struct_text, file_name):
def replace_option(option_match):
_offset = option_match.start(0)
if option_match.group(3):
res = option_match.group(0)[:option_match.end(2) - _offset] + ',\n NULL' + \
option_match.group(0)[option_match.end(2) - _offset:option_match.end(3) - _offset] + \
'NULL,\n NULL,\n ' + option_match.group(0)[option_match.end(3) - _offset:]
else:
return option_match.group(0)
return res
comment_v1 = '/*\n' \
' ********************************\n' \
' * VERSION: 1.3\n' \
' ********************************\n' \
' *\n' \
' * - 1.3: Move translations to libretro_core_options_intl.h\n' \
' * - libretro_core_options_intl.h includes BOM and utf-8\n' \
' * fix for MSVC 2010-2013\n' \
' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \
' * on platforms/compilers without BOM support\n' \
' * - 1.2: Use core options v1 interface when\n' \
' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \
' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \
' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \
' * arrays containing options with a single value\n' \
' * - 1.0: First commit\n' \
'*/\n'
comment_v2 = '/*\n' \
' ********************************\n' \
' * VERSION: 2.0\n' \
' ********************************\n' \
' *\n' \
' * - 2.0: Add support for core options v2 interface\n' \
' * - 1.3: Move translations to libretro_core_options_intl.h\n' \
' * - libretro_core_options_intl.h includes BOM and utf-8\n' \
' * fix for MSVC 2010-2013\n' \
' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \
' * on platforms/compilers without BOM support\n' \
' * - 1.2: Use core options v1 interface when\n' \
' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \
' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \
' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \
' * arrays containing options with a single value\n' \
' * - 1.0: First commit\n' \
'*/\n'
p_intl = cor.p_intl
p_set = cor.p_set
new_set = 'static INLINE void libretro_set_core_options(retro_environment_t environ_cb,\n' \
' bool *categories_supported)\n' \
'{\n' \
' unsigned version = 0;\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' unsigned language = 0;\n' \
'#endif\n' \
'\n' \
' if (!environ_cb || !categories_supported)\n' \
' return;\n' \
'\n' \
' *categories_supported = false;\n' \
'\n' \
' if (!environ_cb(RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION, &version))\n' \
' version = 0;\n' \
'\n' \
' if (version >= 2)\n' \
' {\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' struct retro_core_options_v2_intl core_options_intl;\n' \
'\n' \
' core_options_intl.us = &options_us;\n' \
' core_options_intl.local = NULL;\n' \
'\n' \
' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \
' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH))\n' \
' core_options_intl.local = options_intl[language];\n' \
'\n' \
' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2_INTL,\n' \
' &core_options_intl);\n' \
'#else\n' \
' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2,\n' \
' &options_us);\n' \
'#endif\n' \
' }\n' \
' else\n' \
' {\n' \
' size_t i, j;\n' \
' size_t option_index = 0;\n' \
' size_t num_options = 0;\n' \
' struct retro_core_option_definition\n' \
' *option_v1_defs_us = NULL;\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' size_t num_options_intl = 0;\n' \
' struct retro_core_option_v2_definition\n' \
' *option_defs_intl = NULL;\n' \
' struct retro_core_option_definition\n' \
' *option_v1_defs_intl = NULL;\n' \
' struct retro_core_options_intl\n' \
' core_options_v1_intl;\n' \
'#endif\n' \
' struct retro_variable *variables = NULL;\n' \
' char **values_buf = NULL;\n' \
'\n' \
' /* Determine total number of options */\n' \
' while (true)\n' \
' {\n' \
' if (option_defs_us[num_options].key)\n' \
' num_options++;\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' if (version >= 1)\n' \
' {\n' \
' /* Allocate US array */\n' \
' option_v1_defs_us = (struct retro_core_option_definition *)\n' \
' calloc(num_options + 1, sizeof(struct retro_core_option_definition));\n' \
'\n' \
' /* Copy parameters from option_defs_us array */\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' struct retro_core_option_v2_definition *option_def_us = &option_defs_us[i];\n' \
' struct retro_core_option_value *option_values = option_def_us->values;\n' \
' struct retro_core_option_definition *option_v1_def_us = &option_v1_defs_us[i];\n' \
' struct retro_core_option_value *option_v1_values = option_v1_def_us->values;\n' \
'\n' \
' option_v1_def_us->key = option_def_us->key;\n' \
' option_v1_def_us->desc = option_def_us->desc;\n' \
' option_v1_def_us->info = option_def_us->info;\n' \
' option_v1_def_us->default_value = option_def_us->default_value;\n' \
'\n' \
' /* Values must be copied individually... */\n' \
' while (option_values->value)\n' \
' {\n' \
' option_v1_values->value = option_values->value;\n' \
' option_v1_values->label = option_values->label;\n' \
'\n' \
' option_values++;\n' \
' option_v1_values++;\n' \
' }\n' \
' }\n' \
'\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \
' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH) &&\n' \
' options_intl[language])\n' \
' option_defs_intl = options_intl[language]->definitions;\n' \
'\n' \
' if (option_defs_intl)\n' \
' {\n' \
' /* Determine number of intl options */\n' \
' while (true)\n' \
' {\n' \
' if (option_defs_intl[num_options_intl].key)\n' \
' num_options_intl++;\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' /* Allocate intl array */\n' \
' option_v1_defs_intl = (struct retro_core_option_definition *)\n' \
' calloc(num_options_intl + 1, sizeof(struct retro_core_option_definition));\n' \
'\n' \
' /* Copy parameters from option_defs_intl array */\n' \
' for (i = 0; i < num_options_intl; i++)\n' \
' {\n' \
' struct retro_core_option_v2_definition *option_def_intl = &option_defs_intl[i];\n' \
' struct retro_core_option_value *option_values = option_def_intl->values;\n' \
' struct retro_core_option_definition *option_v1_def_intl = &option_v1_defs_intl[i];\n' \
' struct retro_core_option_value *option_v1_values = option_v1_def_intl->values;\n' \
'\n' \
' option_v1_def_intl->key = option_def_intl->key;\n' \
' option_v1_def_intl->desc = option_def_intl->desc;\n' \
' option_v1_def_intl->info = option_def_intl->info;\n' \
' option_v1_def_intl->default_value = option_def_intl->default_value;\n' \
'\n' \
' /* Values must be copied individually... */\n' \
' while (option_values->value)\n' \
' {\n' \
' option_v1_values->value = option_values->value;\n' \
' option_v1_values->label = option_values->label;\n' \
'\n' \
' option_values++;\n' \
' option_v1_values++;\n' \
' }\n' \
' }\n' \
' }\n' \
'\n' \
' core_options_v1_intl.us = option_v1_defs_us;\n' \
' core_options_v1_intl.local = option_v1_defs_intl;\n' \
'\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_INTL, &core_options_v1_intl);\n' \
'#else\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS, option_v1_defs_us);\n' \
'#endif\n' \
' }\n' \
' else\n' \
' {\n' \
' /* Allocate arrays */\n' \
' variables = (struct retro_variable *)calloc(num_options + 1,\n' \
' sizeof(struct retro_variable));\n' \
' values_buf = (char **)calloc(num_options, sizeof(char *));\n' \
'\n' \
' if (!variables || !values_buf)\n' \
' goto error;\n' \
'\n' \
' /* Copy parameters from option_defs_us array */\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' const char *key = option_defs_us[i].key;\n' \
' const char *desc = option_defs_us[i].desc;\n' \
' const char *default_value = option_defs_us[i].default_value;\n' \
' struct retro_core_option_value *values = option_defs_us[i].values;\n' \
' size_t buf_len = 3;\n' \
' size_t default_index = 0;\n' \
'\n' \
' values_buf[i] = NULL;\n' \
'\n' \
' if (desc)\n' \
' {\n' \
' size_t num_values = 0;\n' \
'\n' \
' /* Determine number of values */\n' \
' while (true)\n' \
' {\n' \
' if (values[num_values].value)\n' \
' {\n' \
' /* Check if this is the default value */\n' \
' if (default_value)\n' \
' if (strcmp(values[num_values].value, default_value) == 0)\n' \
' default_index = num_values;\n' \
'\n' \
' buf_len += strlen(values[num_values].value);\n' \
' num_values++;\n' \
' }\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' /* Build values string */\n' \
' if (num_values > 0)\n' \
' {\n' \
' buf_len += num_values - 1;\n' \
' buf_len += strlen(desc);\n' \
'\n' \
' values_buf[i] = (char *)calloc(buf_len, sizeof(char));\n' \
' if (!values_buf[i])\n' \
' goto error;\n' \
'\n' \
' strcpy(values_buf[i], desc);\n' \
' strcat(values_buf[i], "; ");\n' \
'\n' \
' /* Default value goes first */\n' \
' strcat(values_buf[i], values[default_index].value);\n' \
'\n' \
' /* Add remaining values */\n' \
' for (j = 0; j < num_values; j++)\n' \
' {\n' \
' if (j != default_index)\n' \
' {\n' \
' strcat(values_buf[i], "|");\n' \
' strcat(values_buf[i], values[j].value);\n' \
' }\n' \
' }\n' \
' }\n' \
' }\n' \
'\n' \
' variables[option_index].key = key;\n' \
' variables[option_index].value = values_buf[i];\n' \
' option_index++;\n' \
' }\n' \
'\n' \
' /* Set variables */\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_VARIABLES, variables);\n' \
' }\n' \
'\n' \
'error:\n' \
' /* Clean up */\n' \
'\n' \
' if (option_v1_defs_us)\n' \
' {\n' \
' free(option_v1_defs_us);\n' \
' option_v1_defs_us = NULL;\n' \
' }\n' \
'\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' if (option_v1_defs_intl)\n' \
' {\n' \
' free(option_v1_defs_intl);\n' \
' option_v1_defs_intl = NULL;\n' \
' }\n' \
'#endif\n' \
'\n' \
' if (values_buf)\n' \
' {\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' if (values_buf[i])\n' \
' {\n' \
' free(values_buf[i]);\n' \
' values_buf[i] = NULL;\n' \
' }\n' \
' }\n' \
'\n' \
' free(values_buf);\n' \
' values_buf = NULL;\n' \
' }\n' \
'\n' \
' if (variables)\n' \
' {\n' \
' free(variables);\n' \
' variables = NULL;\n' \
' }\n' \
' }\n' \
'}\n' \
'\n' \
'#ifdef __cplusplus\n' \
'}\n' \
'#endif'
struct_groups = cor.p_struct.finditer(struct_text)
out_text = struct_text
for construct in struct_groups:
repl_text = ''
declaration = construct.group(1)
struct_match = cor.p_type_name.search(declaration)
if struct_match:
if struct_match.group(3):
struct_type_name_lang = struct_match.group(1, 2, 3)
declaration_end = declaration[struct_match.end(1):]
elif struct_match.group(4):
struct_type_name_lang = struct_match.group(1, 2, 4)
declaration_end = declaration[struct_match.end(1):]
else:
struct_type_name_lang = sum((struct_match.group(1, 2), ('_us',)), ())
declaration_end = f'{declaration[struct_match.end(1):struct_match.end(2)]}_us' \
f'{declaration[struct_match.end(2):]}'
else:
return -1
if 'retro_core_option_definition' == struct_type_name_lang[0]:
import shutil
shutil.copy(file_name, file_name + '.v1')
new_declaration = f'\nstruct retro_core_option_v2_category option_cats{struct_type_name_lang[2]}[] = ' \
'{\n { NULL, NULL, NULL },\n' \
'};\n\n' \
+ declaration[:struct_match.start(1)] + \
'retro_core_option_v2_definition' \
+ declaration_end
offset = construct.start(0)
repl_text = repl_text + cor.re.sub(cor.re.escape(declaration), new_declaration,
construct.group(0)[:construct.start(2) - offset])
content = construct.group(2)
new_content = cor.p_option.sub(replace_option, content)
repl_text = repl_text + new_content + cor.re.sub(r'{\s*NULL,\s*NULL,\s*NULL,\s*{\{0}},\s*NULL\s*},\s*};',
'{ NULL, NULL, NULL, NULL, NULL, NULL, {{0}}, NULL },\n};'
'\n\nstruct retro_core_options_v2 options' +
struct_type_name_lang[2] + ' = {\n'
f' option_cats{struct_type_name_lang[2]},\n'
f' option_defs{struct_type_name_lang[2]}\n'
'};',
construct.group(0)[construct.end(2) - offset:])
out_text = cor.re.sub(cor.re.escape(construct.group(0)), repl_text, out_text)
else:
return -2
with open(file_name, 'w', encoding='utf-8') as code_file:
out_text = cor.re.sub(cor.re.escape(comment_v1), comment_v2, out_text)
intl = p_intl.search(out_text)
if intl:
new_intl = out_text[:intl.start(1)] \
+ 'struct retro_core_options_v2 *options_intl[RETRO_LANGUAGE_LAST]' \
+ out_text[intl.end(1):intl.start(2)] \
+ '&options_us, /* RETRO_LANGUAGE_ENGLISH */' \
' &options_ja, /* RETRO_LANGUAGE_JAPANESE */' \
' &options_fr, /* RETRO_LANGUAGE_FRENCH */' \
' &options_es, /* RETRO_LANGUAGE_SPANISH */' \
' &options_de, /* RETRO_LANGUAGE_GERMAN */' \
' &options_it, /* RETRO_LANGUAGE_ITALIAN */' \
' &options_nl, /* RETRO_LANGUAGE_DUTCH */' \
' &options_pt_br, /* RETRO_LANGUAGE_PORTUGUESE_BRAZIL */' \
' &options_pt_pt, /* RETRO_LANGUAGE_PORTUGUESE_PORTUGAL */' \
' &options_ru, /* RETRO_LANGUAGE_RUSSIAN */' \
' &options_ko, /* RETRO_LANGUAGE_KOREAN */' \
' &options_cht, /* RETRO_LANGUAGE_CHINESE_TRADITIONAL */' \
' &options_chs, /* RETRO_LANGUAGE_CHINESE_SIMPLIFIED */' \
' &options_eo, /* RETRO_LANGUAGE_ESPERANTO */' \
' &options_pl, /* RETRO_LANGUAGE_POLISH */' \
' &options_vn, /* RETRO_LANGUAGE_VIETNAMESE */' \
' &options_ar, /* RETRO_LANGUAGE_ARABIC */' \
' &options_el, /* RETRO_LANGUAGE_GREEK */' \
' &options_tr, /* RETRO_LANGUAGE_TURKISH */' \
' &options_sv, /* RETRO_LANGUAGE_SLOVAK */' \
' &options_fa, /* RETRO_LANGUAGE_PERSIAN */' \
' &options_he, /* RETRO_LANGUAGE_HEBREW */' \
' &options_ast, /* RETRO_LANGUAGE_ASTURIAN */' \
' &options_fi, /* RETRO_LANGUAGE_FINNISH */' \
+ out_text[intl.end(2):]
out_text = p_set.sub(new_set, new_intl)
else:
out_text = p_set.sub(new_set, out_text)
code_file.write(out_text)
return 1
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
try:
if os.path.isfile(sys.argv[1]):
_temp = os.path.dirname(sys.argv[1])
else:
_temp = sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
DIR_PATH = _temp
except IndexError:
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + DIR_PATH)
H_FILE_PATH = os.path.join(DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = os.path.join(DIR_PATH, 'libretro_core_options_intl.h')
for file in (H_FILE_PATH, INTL_FILE_PATH):
if os.path.isfile(file):
with open(file, 'r+', encoding='utf-8') as h_file:
text = h_file.read()
try:
test = create_v2_code_file(text, file)
except Exception as e:
print(e)
test = -1
if -1 > test:
print('Your file looks like it already is v2? (' + file + ')')
continue
if 0 > test:
print('An error occured! Please make sure to use the complete v1 struct! (' + file + ')')
continue
else:
print(file + ' not found.')

252
streams/network_stream.c Normal file
View file

@ -0,0 +1,252 @@
/* Copyright (C) 2022 The RetroArch team
*
* ---------------------------------------------------------------------------------------
* The following license statement only applies to this file (network_stream.c).
* ---------------------------------------------------------------------------------------
*
* Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <string.h>
#include <retro_endianness.h>
#include <streams/network_stream.h>
bool netstream_open(netstream_t *stream, void *buf, size_t size, size_t used)
{
if (buf)
{
/* Pre-allocated buffer must have a non-zero size. */
if (!size || used > size)
return false;
}
else
{
if (size)
{
buf = malloc(size);
if (!buf)
return false;
}
used = 0;
}
stream->buf = buf;
stream->size = size;
stream->used = used;
stream->pos = 0;
return true;
}
void netstream_close(netstream_t *stream, bool dealloc)
{
if (dealloc)
free(stream->buf);
memset(stream, 0, sizeof(*stream));
}
void netstream_reset(netstream_t *stream)
{
stream->pos = 0;
stream->used = 0;
}
bool netstream_truncate(netstream_t *stream, size_t used)
{
if (used > stream->size)
return false;
stream->used = used;
/* If the current stream position is past our new end of stream,
set the current position to the end of the stream. */
if (stream->pos > used)
stream->pos = used;
return true;
}
void netstream_data(netstream_t *stream, void **data, size_t *len)
{
*data = stream->buf;
*len = stream->used;
}
size_t netstream_tell(netstream_t *stream)
{
return stream->pos;
}
bool netstream_seek(netstream_t *stream, long offset, int origin)
{
long pos = (long)stream->pos;
long used = (long)stream->used;
switch (origin)
{
case NETSTREAM_SEEK_SET:
pos = offset;
break;
case NETSTREAM_SEEK_CUR:
pos += offset;
break;
case NETSTREAM_SEEK_END:
pos = used + offset;
break;
default:
return false;
}
if (pos < 0 || pos > used)
return false;
stream->pos = (size_t)pos;
return true;
}
bool netstream_read(netstream_t *stream, void *data, size_t len)
{
size_t remaining = stream->used - stream->pos;
if (!data || !remaining || len > remaining)
return false;
/* If len is 0, read all remaining bytes. */
if (!len)
len = remaining;
memcpy(data, (uint8_t*)stream->buf + stream->pos, len);
stream->pos += len;
return true;
}
/* This one doesn't require any swapping. */
bool netstream_read_byte(netstream_t *stream, uint8_t *data)
{
return netstream_read(stream, data, sizeof(*data));
}
#define NETSTREAM_READ_TYPE(name, type, swap) \
bool netstream_read_##name(netstream_t *stream, type *data) \
{ \
if (!netstream_read(stream, data, sizeof(*data))) \
return false; \
*data = swap(*data); \
return true; \
}
NETSTREAM_READ_TYPE(word, uint16_t, retro_be_to_cpu16)
NETSTREAM_READ_TYPE(dword, uint32_t, retro_be_to_cpu32)
NETSTREAM_READ_TYPE(qword, uint64_t, retro_be_to_cpu64)
#undef NETSTREAM_READ_TYPE
#ifdef __STDC_IEC_559__
#define NETSTREAM_READ_TYPE(name, type, type_alt, swap) \
bool netstream_read_##name(netstream_t *stream, type *data) \
{ \
type_alt *data_alt = (type_alt*)data; \
if (!netstream_read(stream, data, sizeof(*data))) \
return false; \
*data_alt = swap(*data_alt); \
return true; \
}
NETSTREAM_READ_TYPE(float, float, uint32_t, retro_be_to_cpu32)
NETSTREAM_READ_TYPE(double, double, uint64_t, retro_be_to_cpu64)
#undef NETSTREAM_READ_TYPE
#endif
bool netstream_write(netstream_t *stream, const void *data, size_t len)
{
size_t remaining = stream->size - stream->pos;
if (!data || !len)
return false;
if (len > remaining)
{
if (!stream->size)
{
stream->buf = malloc(len);
if (!stream->buf)
return false;
stream->size = len;
}
else
{
size_t size = stream->size + (len - remaining);
void *buf = realloc(stream->buf, size);
if (!buf)
return false;
stream->buf = buf;
stream->size = size;
}
}
memcpy((uint8_t*)stream->buf + stream->pos, data, len);
stream->pos += len;
if (stream->pos > stream->used)
stream->used = stream->pos;
return true;
}
/* This one doesn't require any swapping. */
bool netstream_write_byte(netstream_t *stream, uint8_t data)
{
return netstream_write(stream, &data, sizeof(data));
}
#define NETSTREAM_WRITE_TYPE(name, type, swap) \
bool netstream_write_##name(netstream_t *stream, type data) \
{ \
data = swap(data); \
return netstream_write(stream, &data, sizeof(data)); \
}
NETSTREAM_WRITE_TYPE(word, uint16_t, retro_cpu_to_be16)
NETSTREAM_WRITE_TYPE(dword, uint32_t, retro_cpu_to_be32)
NETSTREAM_WRITE_TYPE(qword, uint64_t, retro_cpu_to_be64)
#undef NETSTREAM_WRITE_TYPE
#ifdef __STDC_IEC_559__
#define NETSTREAM_WRITE_TYPE(name, type, type_alt, swap) \
bool netstream_write_##name(netstream_t *stream, type data) \
{ \
type_alt *data_alt = (type_alt*)&data; \
*data_alt = swap(*data_alt); \
return netstream_write(stream, &data, sizeof(data)); \
}
NETSTREAM_WRITE_TYPE(float, float, uint32_t, retro_cpu_to_be32)
NETSTREAM_WRITE_TYPE(double, double, uint64_t, retro_cpu_to_be64)
#undef NETSTREAM_WRITE_TYPE
#endif