Implemented CACHE IXIN/IHIN and LLD/SCD instructions.

Optimized LDL/LDR/SDL/SDR instructions.
Optimized ULW/USW/ULD/USD instruction macros.
Optimized AND/OR/XOR/NOR/ORI/XORI with 32-bit operands.
Updated Mupen64Plus RSP HLE plugin.
Updated xxHash to v0.6.5.
Other minor fixes.
This commit is contained in:
Extrems 2022-10-29 19:17:02 -04:00
parent 75c0d1ae96
commit c73cf029eb
16 changed files with 1233 additions and 471 deletions

View file

@ -14,11 +14,6 @@
#else
# include "../main/winlnxdefs.h"
#endif // __LINUX__
#define XXH_PRIVATE_API
#define XXH_FORCE_MEMORY_ACCESS 2
#define XXH_FORCE_NATIVE_FORMAT 1
#define XXH_FORCE_ALIGN_CHECK 0
#include "../main/xxhash.h"
#define CRC32_POLYNOMIAL 0x04C11DB7
@ -99,8 +94,3 @@ DWORD CRC_CalculatePalette( DWORD crc, void *buffer, DWORD count )
return crc ^ orig;
}
DWORD Hash_Calculate( DWORD hash, void *buffer, DWORD count )
{
return XXH32(buffer, count, hash);
}

View file

@ -14,10 +14,16 @@
#else
# include "../main/winlnxdefs.h"
#endif // __LINUX__
#define XXH_INLINE_ALL
#define XXH_FORCE_NATIVE_FORMAT 1
#include "../main/xxhash.h"
void CRC_BuildTable();
DWORD CRC_Calculate( DWORD crc, void *buffer, DWORD count );
DWORD CRC_CalculatePalette( DWORD crc, void *buffer, DWORD count );
DWORD Hash_Calculate( DWORD hash, void *buffer, DWORD count );
inline DWORD Hash_Calculate( DWORD hash, void *buffer, DWORD count )
{
return XXH32(buffer, count, hash);
}

View file

@ -594,7 +594,7 @@ void TextureCache_Init()
cache.dummy->clampT = 1;
cache.dummy->clampWidth = 2;
cache.dummy->clampHeight = 2;
cache.dummy->crc = 0;
cache.dummy->hash = 0;
cache.dummy->format = 0;
cache.dummy->size = 0;
cache.dummy->frameBufferTexture = FALSE;
@ -1500,9 +1500,9 @@ void TextureCache_Load( CachedTexture *texInfo )
#endif // !__GX__
}
u32 TextureCache_CalculateCRC( u32 t, u32 width, u32 height )
u32 TextureCache_CalculateHash( u32 t, u32 width, u32 height )
{
u32 crc;
u32 hash;
u32 y, /*i,*/ bpl, lineBytes, line;
u64 *src;
@ -1513,21 +1513,21 @@ u32 TextureCache_CalculateCRC( u32 t, u32 width, u32 height )
if (gSP.textureTile[t]->size == G_IM_SIZ_32b)
line <<= 1;
crc = 0xFFFFFFFF;
hash = 0;
for (y = 0; y < height; y++)
{
src = (u64*)&TMEM[(gSP.textureTile[t]->tmem + line * y) & 0x1FF];
crc = Hash_Calculate( crc, src, bpl );
hash = Hash_Calculate( hash, src, bpl );
}
if ((gDP.otherMode.textureLUT != G_TT_NONE) || (gSP.textureTile[t]->format == G_IM_FMT_CI))
{
if (gSP.textureTile[t]->size == G_IM_SIZ_4b)
crc = Hash_Calculate( crc, &TMEM[0x100 + (gSP.textureTile[t]->palette << 4)], 128 );
hash = Hash_Calculate( hash, &TMEM[0x100 + (gSP.textureTile[t]->palette << 4)], 128 );
else if ((gSP.textureTile[t]->size == G_IM_SIZ_8b) || (gSP.textureTile[t]->size == G_IM_SIZ_16b))
crc = Hash_Calculate( crc, &TMEM[0x100], 2048 );
hash = Hash_Calculate( hash, &TMEM[0x100], 2048 );
}
return crc;
return hash;
}
void TextureCache_ActivateTexture( u32 t, CachedTexture *texture )
@ -1617,23 +1617,23 @@ void TextureCache_ActivateDummy( u32 t )
void TextureCache_UpdateBackground()
{
u32 numBytes = gSP.bgImage.width * gSP.bgImage.height << gSP.bgImage.size >> 1;
u32 crc;
u32 hash;
crc = Hash_Calculate( 0xFFFFFFFF, &RDRAM[gSP.bgImage.address], numBytes );
hash = Hash_Calculate( 0, &RDRAM[gSP.bgImage.address], numBytes );
if (gSP.bgImage.format == G_IM_FMT_CI)
{
if (gSP.bgImage.size == G_IM_SIZ_4b)
crc = Hash_Calculate( crc, &TMEM[0x100 + (gSP.bgImage.palette << 4)], 128 );
hash = Hash_Calculate( hash, &TMEM[0x100 + (gSP.bgImage.palette << 4)], 128 );
else if (gSP.bgImage.size == G_IM_SIZ_8b)
crc = Hash_Calculate( crc, &TMEM[0x100], 2048 );
hash = Hash_Calculate( hash, &TMEM[0x100], 2048 );
}
CachedTexture *current = cache.top;
while (current)
{
if ((current->crc == crc) &&
if ((current->hash == hash) &&
(current->width == gSP.bgImage.width) &&
(current->height == gSP.bgImage.height) &&
(current->format == gSP.bgImage.format) &&
@ -1664,7 +1664,7 @@ void TextureCache_UpdateBackground()
#endif // !__GX__
cache.current[0]->address = gSP.bgImage.address;
cache.current[0]->crc = crc;
cache.current[0]->hash = hash;
cache.current[0]->format = gSP.bgImage.format;
cache.current[0]->size = gSP.bgImage.size;
@ -1706,7 +1706,7 @@ void TextureCache_Update( u32 t )
{
CachedTexture *current;
//s32 i, j, k;
u32 crc, /*bpl, cacheNum,*/ maxTexels;
u32 hash, /*bpl, cacheNum,*/ maxTexels;
u32 tileWidth, maskWidth, loadWidth, lineWidth, clampWidth, height;
u32 tileHeight, maskHeight, loadHeight, lineHeight, clampHeight, width;
@ -1848,7 +1848,7 @@ void TextureCache_Update( u32 t )
maskHeight = 1 << gSP.textureTile[t]->maskt;
}
crc = TextureCache_CalculateCRC( t, width, height );
hash = TextureCache_CalculateHash( t, width, height );
// if (!TextureCache_Verify())
// current = cache.top;
@ -1856,7 +1856,7 @@ void TextureCache_Update( u32 t )
current = cache.top;
while (current)
{
if ((current->crc == crc) &&
if ((current->hash == hash) &&
// (current->address == gDP.textureImage.address) &&
// (current->palette == gSP.textureTile[t]->palette) &&
(current->width == width) &&
@ -1902,7 +1902,7 @@ void TextureCache_Update( u32 t )
#endif // !__GX__
cache.current[t]->address = gDP.textureImage.address;
cache.current[t]->crc = crc;
cache.current[t]->hash = hash;
cache.current[t]->format = gSP.textureTile[t]->format;
cache.current[t]->size = gSP.textureTile[t]->size;

View file

@ -37,7 +37,7 @@ struct CachedTexture
GLuint glName;
u32 address;
u32 crc;
u32 hash;
// float fulS, fulT;
// WORD ulS, ulT, lrS, lrT;
float offsetS, offsetT;

View file

@ -50,20 +50,27 @@
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
|| defined(__PPC__) )
# define XXH_FORCE_MEMORY_ACCESS 2
# elif defined(__INTEL_COMPILER) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7S__) ))
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
* By default, this option is disabled. To enable it, uncomment below define :
* If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
* When this macro is enabled, xxHash actively checks input for null pointer.
* It it is, result for null input pointers is the same as a null-length input.
*/
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
# define XXH_ACCEPT_NULL_INPUT_POINTER 0
#endif
/*!XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
@ -85,7 +92,8 @@
* or when alignment doesn't matter for performance.
*/
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# if defined(__i386) || defined(__x86_64__) || defined(__PPC__) \
|| defined(_M_IX86) || defined(_M_X64) || defined(_M_PPC) /* visual */
# define XXH_FORCE_ALIGN_CHECK 0
# else
# define XXH_FORCE_ALIGN_CHECK 1
@ -105,6 +113,8 @@ static void XXH_free (void* p) { free(p); }
#include <string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
#include <assert.h> /* assert */
#define XXH_STATIC_LINKING_ONLY
#include "xxhash.h"
@ -132,7 +142,9 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
* Basic Types
***************************************/
#ifndef MEM_MODULE
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
@ -207,8 +219,12 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
static const int g_one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
static int XXH_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
#endif
@ -239,12 +255,12 @@ static U32 XXH_readBE32(const void* ptr)
/* *************************************
* Macros
***************************************/
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* *******************************************************************
* 32-bits hash functions
* 32-bit hash functions
*********************************************************************/
static const U32 PRIME32_1 = 2654435761U;
static const U32 PRIME32_2 = 2246822519U;
@ -260,14 +276,89 @@ static U32 XXH32_round(U32 seed, U32 input)
return seed;
}
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
/* mix all bits */
static U32 XXH32_avalanche(U32 h32)
{
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return(h32);
}
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
static U32
XXH32_finalize(U32 h32, const void* ptr, size_t len,
XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)ptr;
#define PROCESS1 \
h32 += (*p) * PRIME32_5; \
p++; \
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
#define PROCESS4 \
h32 += XXH_get32bits(p) * PRIME32_3; \
p+=4; \
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
switch(len&15) /* or switch(bEnd - p) */
{
case 12: PROCESS4;
/* fallthrough */
case 8: PROCESS4;
/* fallthrough */
case 4: PROCESS4;
return XXH32_avalanche(h32);
case 13: PROCESS4;
/* fallthrough */
case 9: PROCESS4;
/* fallthrough */
case 5: PROCESS4;
PROCESS1;
return XXH32_avalanche(h32);
case 14: PROCESS4;
/* fallthrough */
case 10: PROCESS4;
/* fallthrough */
case 6: PROCESS4;
PROCESS1;
PROCESS1;
return XXH32_avalanche(h32);
case 15: PROCESS4;
/* fallthrough */
case 11: PROCESS4;
/* fallthrough */
case 7: PROCESS4;
/* fallthrough */
case 3: PROCESS1;
/* fallthrough */
case 2: PROCESS1;
/* fallthrough */
case 1: PROCESS1;
/* fallthrough */
case 0: return XXH32_avalanche(h32);
}
assert(0);
return h32; /* reaching this point is deemed impossible */
}
FORCE_INLINE U32
XXH32_endian_align(const void* input, size_t len, U32 seed,
XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U32 h32;
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)16;
@ -275,7 +366,7 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH
#endif
if (len>=16) {
const BYTE* const limit = bEnd - 16;
const BYTE* const limit = bEnd - 15;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
@ -286,34 +377,17 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
} while (p<=limit);
} while (p < limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
} else {
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
h32 += (U32)len;
while (p+4<=bEnd) {
h32 += XXH_get32bits(p) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
return XXH32_finalize(h32, p, len&15, endian, align);
}
@ -365,23 +439,25 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
memset(&state, 0, sizeof(state));
state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME32_1;
memcpy(statePtr, &state, sizeof(state));
/* do not write into reserved, planned to be removed in a future version */
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
return XXH_OK;
}
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
FORCE_INLINE
XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
if (input==NULL)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
return XXH_OK;
#else
return XXH_ERROR;
@ -436,6 +512,7 @@ FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
@ -447,40 +524,23 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void*
}
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
FORCE_INLINE U32
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem32;
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
U32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
h32 = XXH_rotl32(state->v1, 1)
+ XXH_rotl32(state->v2, 7)
+ XXH_rotl32(state->v3, 12)
+ XXH_rotl32(state->v4, 18);
} else {
h32 = state->v3 /* == seed */ + PRIME32_5;
}
h32 += state->total_len_32;
while (p+4<=bEnd) {
h32 += XXH_readLE32(p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
}
@ -500,7 +560,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
/*! Default XXH result types are basic unsigned 32 and 64 bits.
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
* This way, hash values can be written into a file or buffer, remaining comparable across different systems.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
@ -519,18 +579,21 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
#ifndef XXH_NO_LONG_LONG
/* *******************************************************************
* 64-bits hash functions
* 64-bit hash functions
*********************************************************************/
/*====== Memory access ======*/
#ifndef MEM_MODULE
# define MEM_MODULE
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint64_t U64;
# else
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
/* if compiler doesn't support unsigned long long, replace by another 64-bit type */
typedef unsigned long long U64;
# endif
#endif
@ -623,14 +686,138 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
return acc;
}
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
static U64 XXH64_avalanche(U64 h64)
{
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
static U64
XXH64_finalize(U64 h64, const void* ptr, size_t len,
XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)ptr;
#define PROCESS1_64 \
h64 ^= (*p) * PRIME64_5; \
p++; \
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
#define PROCESS4_64 \
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
p+=4; \
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
#define PROCESS8_64 { \
U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
p+=8; \
h64 ^= k1; \
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
}
switch(len&31) {
case 24: PROCESS8_64;
/* fallthrough */
case 16: PROCESS8_64;
/* fallthrough */
case 8: PROCESS8_64;
return XXH64_avalanche(h64);
case 28: PROCESS8_64;
/* fallthrough */
case 20: PROCESS8_64;
/* fallthrough */
case 12: PROCESS8_64;
/* fallthrough */
case 4: PROCESS4_64;
return XXH64_avalanche(h64);
case 25: PROCESS8_64;
/* fallthrough */
case 17: PROCESS8_64;
/* fallthrough */
case 9: PROCESS8_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 29: PROCESS8_64;
/* fallthrough */
case 21: PROCESS8_64;
/* fallthrough */
case 13: PROCESS8_64;
/* fallthrough */
case 5: PROCESS4_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 26: PROCESS8_64;
/* fallthrough */
case 18: PROCESS8_64;
/* fallthrough */
case 10: PROCESS8_64;
PROCESS1_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 30: PROCESS8_64;
/* fallthrough */
case 22: PROCESS8_64;
/* fallthrough */
case 14: PROCESS8_64;
/* fallthrough */
case 6: PROCESS4_64;
PROCESS1_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 27: PROCESS8_64;
/* fallthrough */
case 19: PROCESS8_64;
/* fallthrough */
case 11: PROCESS8_64;
PROCESS1_64;
PROCESS1_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 31: PROCESS8_64;
/* fallthrough */
case 23: PROCESS8_64;
/* fallthrough */
case 15: PROCESS8_64;
/* fallthrough */
case 7: PROCESS4_64;
/* fallthrough */
case 3: PROCESS1_64;
/* fallthrough */
case 2: PROCESS1_64;
/* fallthrough */
case 1: PROCESS1_64;
/* fallthrough */
case 0: return XXH64_avalanche(h64);
}
/* impossible to reach */
assert(0);
return 0; /* unreachable, but some compilers complain without it */
}
FORCE_INLINE U64
XXH64_endian_align(const void* input, size_t len, U64 seed,
XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U64 h64;
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)32;
@ -663,32 +850,7 @@ FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH
h64 += (U64) len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
return XXH64_finalize(h64, p, len, endian, align);
}
@ -738,22 +900,24 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
{
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
memset(&state, 0, sizeof(state));
state.v1 = seed + PRIME64_1 + PRIME64_2;
state.v2 = seed + PRIME64_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME64_1;
memcpy(statePtr, &state, sizeof(state));
/* do not write into reserved, planned to be removed in a future version */
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
return XXH_OK;
}
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
FORCE_INLINE
XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
if (input==NULL)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
return XXH_OK;
#else
return XXH_ERROR;
@ -817,8 +981,6 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void*
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem64;
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
U64 h64;
if (state->total_len >= 32) {
@ -833,37 +995,12 @@ FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = state->v3 + PRIME64_5;
h64 = state->v3 /*seed*/ + PRIME64_5;
}
h64 += (U64) state->total_len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
}
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)

View file

@ -57,8 +57,8 @@ Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
A 64-bits version, named XXH64, is available since r35.
It offers much better speed, but for 64-bits applications only.
A 64-bit version, named XXH64, is available since r35.
It offers much better speed, but for 64-bit applications only.
Name Speed on 64 bits Speed on 32 bits
XXH64 13.8 GB/s 1.9 GB/s
XXH32 6.8 GB/s 6.0 GB/s
@ -80,18 +80,19 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/* ****************************
* API modifier
******************************/
/** XXH_PRIVATE_API
* This is useful to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Methodology :
* #define XXH_PRIVATE_API
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module.
*/
#ifdef XXH_PRIVATE_API
* API modifier
******************************/
/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
* This is useful to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Inlining can offer dramatic performance improvement on small keys.
* Methodology :
* #define XXH_INLINE_ALL
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module.
*/
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
# endif
@ -102,23 +103,24 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
# elif defined(_MSC_VER)
# define XXH_PUBLIC_API static __inline
# else
# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */
/* this version may generate warnings for unused static functions */
# define XXH_PUBLIC_API static
# endif
#else
# define XXH_PUBLIC_API /* do nothing */
#endif /* XXH_PRIVATE_API */
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/*!XXH_NAMESPACE, aka Namespace Emulation :
If you want to include _and expose_ xxHash functions from within your own library,
but also want to avoid symbol collisions with other libraries which may also include xxHash,
you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
Note that no change is required within the calling program as long as it includes `xxhash.h` :
regular symbol name will be automatically translated by this header.
*/
/*! XXH_NAMESPACE, aka Namespace Emulation :
*
* If you want to include _and expose_ xxHash functions from within your own library,
* but also want to avoid symbol collisions with other libraries which may also include xxHash,
*
* you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
* with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
*
* Note that no change is required within the calling program as long as it includes `xxhash.h` :
* regular symbol name will be automatically translated by this header.
*/
#ifdef XXH_NAMESPACE
# define XXH_CAT(A,B) A##B
# define XXH_NAME2(A,B) XXH_CAT(A,B)
@ -149,18 +151,18 @@ regular symbol name will be automatically translated by this header.
***************************************/
#define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 6
#define XXH_VERSION_RELEASE 3
#define XXH_VERSION_RELEASE 5
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
/*-**********************************************************************
* 32-bits hash
* 32-bit hash
************************************************************************/
typedef unsigned int XXH32_hash_t;
/*! XXH32() :
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
@ -177,26 +179,25 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void*
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
/*
These functions generate the xxHash of an input provided in multiple segments.
Note that, for small input, they are slower than single-call functions, due to state management.
For small input, prefer `XXH32()` and `XXH64()` .
XXH state must first be allocated, using XXH*_createState() .
Start a new hash by initializing state with a seed, using XXH*_reset().
Then, feed the hash state by calling XXH*_update() as many times as necessary.
Obviously, input must be allocated and read accessible.
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
Finally, a hash value can be produced anytime, by using XXH*_digest().
This function returns the nn-bits hash as an int or long long.
It's still possible to continue inserting input into the hash state after a digest,
and generate some new hashes later on, by calling again XXH*_digest().
When done, free XXH state space if it was allocated dynamically.
*/
* Streaming functions generate the xxHash of an input provided in multiple segments.
* Note that, for small input, they are slower than single-call functions, due to state management.
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
*
* XXH state must first be allocated, using XXH*_createState() .
*
* Start a new hash by initializing state with a seed, using XXH*_reset().
*
* Then, feed the hash state by calling XXH*_update() as many times as necessary.
* The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
*
* Finally, a hash value can be produced anytime, by using XXH*_digest().
* This function returns the nn-bits hash as an int or long long.
*
* It's still possible to continue inserting input into the hash state after a digest,
* and generate some new hashes later on, by calling again XXH*_digest().
*
* When done, free XXH state space if it was allocated dynamically.
*/
/*====== Canonical representation ======*/
@ -205,22 +206,22 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
#ifndef XXH_NO_LONG_LONG
/*-**********************************************************************
* 64-bits hash
* 64-bit hash
************************************************************************/
typedef unsigned long long XXH64_hash_t;
/*! XXH64() :
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
"seed" can be used to alter the result predictably.
This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
*/
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
@ -241,18 +242,49 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
#endif /* XXH_NO_LONG_LONG */
#ifdef XXH_STATIC_LINKING_ONLY
/* ================================================================================================
This section contains definitions which are not guaranteed to remain stable.
This section contains declarations which are not guaranteed to remain stable.
They may change in future versions, becoming incompatible with a different version of the library.
They shall only be used with static linking.
Never use these definitions in association with dynamic linking !
These declarations should only be used with static linking.
Never use them in association with dynamic linking !
=================================================================================================== */
/* These definitions are only meant to make possible
static allocation of XXH state, on stack or in a struct for example.
Never use members directly. */
/* These definitions are only present to allow
* static allocation of XXH state, on stack or in a struct for example.
* Never **ever** use members directly. */
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
struct XXH32_state_s {
uint32_t total_len_32;
uint32_t large_len;
uint32_t v1;
uint32_t v2;
uint32_t v3;
uint32_t v4;
uint32_t mem32[4];
uint32_t memsize;
uint32_t reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
struct XXH64_state_s {
uint64_t total_len;
uint64_t v1;
uint64_t v2;
uint64_t v3;
uint64_t v4;
uint64_t mem64[4];
uint32_t memsize;
uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# else
struct XXH32_state_s {
unsigned total_len_32;
@ -261,25 +293,28 @@ struct XXH32_state_s {
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4]; /* buffer defined as U32 for alignment */
unsigned mem32[4];
unsigned memsize;
unsigned reserved; /* never read nor write, will be removed in a future version */
unsigned reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
#ifndef XXH_NO_LONG_LONG /* remove 64-bits support */
# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
unsigned long long mem64[4];
unsigned memsize;
unsigned reserved[2]; /* never read nor write, will be removed in a future version */
unsigned reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
#endif
# endif
#ifdef XXH_PRIVATE_API
# endif
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
#endif

View file

@ -115,7 +115,9 @@ unsigned int decodeNInterpret();
#define INTERPRET_TRAPS
//#define INTERPRET_LL
//#define INTERPRET_LLD
//#define INTERPRET_SC
//#define INTERPRET_SCD
//#define INTERPRET_COP0
//#define INTERPRET_MFC0

File diff suppressed because it is too large Load diff

View file

@ -65,6 +65,8 @@ typedef unsigned int MIPS_instr;
#define MIPS_IMMED_MASK 0xFFFF
#define MIPS_IMMED_SHIFT 0
#define MIPS_GET_IMMED(instr) ((instr >> MIPS_IMMED_SHIFT) & MIPS_IMMED_MASK)
#define MIPS_GET_SIMMED(instr) ((signed short)MIPS_GET_IMMED(instr))
#define MIPS_GET_UIMMED(instr) ((unsigned short)MIPS_GET_IMMED(instr))
#define MIPS_LI_MASK 0x3FFFFFF
#define MIPS_LI_SHIFT 0

View file

@ -141,8 +141,9 @@ RegMapping mapRegister64New(int gpr){
if(regMap[gpr].map.hi < 0){
// Try to find any already available register
int available = getAvailableHWReg();
if(available >= 0) regMap[gpr].map.hi = available;
else {
if(available >= 0){
regMap[gpr].map.hi = available;
} else {
// We didn't find an available register, so flush one
RegMapping lru = flushLRURegister();
if(lru.hi >= 0) availableRegs[lru.hi] = 1;
@ -160,7 +161,10 @@ RegMapping mapRegister64New(int gpr){
if(regMap[gpr].map.lo < 0){
// We didn't find any available registers, so flush one
RegMapping lru = flushLRURegister();
if(lru.hi >= 0) availableRegs[lru.hi] = 1;
if(regMap[gpr].map.hi < 0)
regMap[gpr].map.hi = lru.hi;
else if(lru.hi >= 0)
availableRegs[lru.hi] = 1;
regMap[gpr].map.lo = lru.lo;
}
if(regMap[gpr].map.hi < 0){
@ -241,7 +245,10 @@ RegMapping mapRegister64(int gpr){
if(regMap[gpr].map.lo < 0){
// We didn't find any available registers, so flush one
RegMapping lru = flushLRURegister();
if(lru.hi >= 0) availableRegs[lru.hi] = 1;
if(regMap[gpr].map.hi < 0)
regMap[gpr].map.hi = lru.hi;
else if(lru.hi >= 0)
availableRegs[lru.hi] = 1;
regMap[gpr].map.lo = lru.lo;
}
if(regMap[gpr].map.hi < 0){

View file

@ -220,8 +220,12 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
PC->addr = interp_addr = pc;
delay_slot = isDelaySlot;
if(count < 1 || value + count > 32) abort();
switch(type){
case MEM_LW:
addr &= ~3;
case MEM_ULW:
for(i = 0; i < count; i++){
address = addr + i*4;
read_word_in_memory();
@ -230,6 +234,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LWU:
addr &= ~3;
for(i = 0; i < count; i++){
address = addr + i*4;
read_word_in_memory();
@ -238,6 +243,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LH:
addr &= ~1;
for(i = 0; i < count; i++){
address = addr + i*2;
read_hword_in_memory();
@ -246,6 +252,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LHU:
addr &= ~1;
for(i = 0; i < count; i++){
address = addr + i*2;
read_hword_in_memory();
@ -270,6 +277,8 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LD:
addr &= ~7;
case MEM_ULD:
for(i = 0; i < count; i++){
address = addr + i*8;
read_dword_in_memory();
@ -278,6 +287,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LWC1:
addr &= ~3;
for(i = 0; i < count; i++){
address = addr + i*4;
read_word_in_memory();
@ -286,6 +296,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LDC1:
addr &= ~7;
for(i = 0; i < count; i++){
address = addr + i*8;
read_dword_in_memory();
@ -294,12 +305,19 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_LL:
address = addr;
address = addr &= ~3;
read_word_in_memory();
if(!address) break;
reg[value] = (signed long)word;
llbit = 1;
break;
case MEM_LLD:
address = addr &= ~7;
read_dword_in_memory();
if(!address) break;
reg[value] = dword;
llbit = 1;
break;
case MEM_LWL:
address = addr & ~3;
read_word_in_memory();
@ -309,7 +327,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
} else {
u32 shift = (addr & 3) * 8;
u32 mask = 0xFFFFFFFF << shift;
reg[value] = (reg[value] & ~mask) | ((word << shift) & mask);
reg[value] = (signed long)((reg[value] & ~mask) | ((word << shift) & mask));
}
break;
case MEM_LWR:
@ -321,7 +339,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
} else {
u32 shift = (~addr & 3) * 8;
u32 mask = 0xFFFFFFFF >> shift;
reg[value] = (reg[value] & ~mask) | ((word >> shift) & mask);
reg[value] = (signed long)((reg[value] & ~mask) | ((word >> shift) & mask));
}
break;
case MEM_LDL:
@ -349,6 +367,8 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
}
break;
case MEM_SW:
addr &= ~3;
case MEM_USW:
for(i = 0; i < count; i++){
address = addr + i*4;
word = reg[value + i];
@ -357,6 +377,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
invalidate_func(addr);
break;
case MEM_SH:
addr &= ~1;
for(i = 0; i < count; i++){
address = addr + i*2;
hword = reg[value + i];
@ -373,6 +394,8 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
invalidate_func(addr);
break;
case MEM_SD:
addr &= ~7;
case MEM_USD:
for(i = 0; i < count; i++){
address = addr + i*8;
dword = reg[value + i];
@ -381,6 +404,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
invalidate_func(addr);
break;
case MEM_SWC1:
addr &= ~3;
for(i = 0; i < count; i++){
address = addr + i*4;
word = *((long*)reg_cop1_simple[value + i*2]);
@ -389,6 +413,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
invalidate_func(addr);
break;
case MEM_SDC1:
addr &= ~7;
for(i = 0; i < count; i++){
address = addr + i*8;
dword = *((long long*)reg_cop1_double[value + i*2]);
@ -398,7 +423,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
break;
case MEM_SC:
if(llbit){
address = addr;
address = addr &= ~3;
word = reg[value];
write_word_in_memory();
invalidate_func(addr);
@ -406,6 +431,16 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
reg[value] = !!llbit;
llbit = 0;
break;
case MEM_SCD:
if(llbit){
address = addr &= ~7;
dword = reg[value];
write_dword_in_memory();
invalidate_func(addr);
}
reg[value] = !!llbit;
llbit = 0;
break;
case MEM_SWL:
address = addr & ~3;
read_word_in_memory();
@ -459,7 +494,7 @@ unsigned int dyna_mem(unsigned int addr, unsigned int value, int count,
invalidate_func(addr);
break;
default:
stop = 1;
abort();
break;
}
delay_slot = 0;

View file

@ -43,11 +43,13 @@ extern double* reg_cop1_double[32] __attribute__((section(".sbss")));
extern int noCheckInterrupt;
typedef enum { MEM_LW, MEM_LH, MEM_LB, MEM_LD,
MEM_ULW, MEM_ULD,
MEM_LWU, MEM_LHU, MEM_LBU,
MEM_LWC1, MEM_LDC1, MEM_LL,
MEM_LWC1, MEM_LDC1, MEM_LL, MEM_LLD,
MEM_LWL, MEM_LWR, MEM_LDL, MEM_LDR,
MEM_SW, MEM_SH, MEM_SB, MEM_SD,
MEM_SWC1, MEM_SDC1, MEM_SC,
MEM_USW, MEM_USD,
MEM_SWC1, MEM_SDC1, MEM_SC, MEM_SCD,
MEM_SWL, MEM_SWR, MEM_SDL, MEM_SDR } memType;
void dynarec(unsigned int address);

View file

@ -1017,8 +1017,22 @@ void alist_iirf(
count -= 0x10;
} while (count > 0);
dram_store_u16(hle, (uint16_t*)&frame[6], address + 4, 4);
dram_store_u16(hle, (uint16_t*)&ibuf[(index-2)&3], address+8, 2);
dram_store_u16(hle, (uint16_t*)&ibuf[(index-1)&3], address+10, 2);
dram_store_u16(hle, (uint16_t*)&frame[6], address + 4, 2);
dram_store_u16(hle, (uint16_t*)&ibuf[(index-2)&3], address+8, 1);
dram_store_u16(hle, (uint16_t*)&ibuf[(index-1)&3], address+10, 1);
}
/* Perform a clamped gain, then attenuate it back by an amount */
void alist_overload(struct hle_t* hle, uint16_t dmem, int16_t count, int16_t gain, uint16_t attenuation)
{
int16_t accu;
int16_t * sample = (int16_t*)(hle->alist_buffer + dmem);
while (count != 0)
{
accu = clamp_s16(*sample * gain);
*sample = (accu * attenuation) >> 16;
sample++;
count --;
}
}

View file

@ -150,6 +150,13 @@ void alist_iirf(
int16_t* table,
uint32_t address);
void alist_overload(
struct hle_t* hle,
uint16_t dmem,
int16_t count,
int16_t gain,
uint16_t attenuation);
/*
* Audio flags
*/

View file

@ -263,6 +263,16 @@ static void MP3(struct hle_t* hle, uint32_t w1, uint32_t w2)
mp3_task(hle, index, address);
}
static void OVERLOAD(struct hle_t* hle, uint32_t w1, uint32_t w2)
{
/* Overload distortion effect for Conker's Bad Fur Day */
uint16_t dmem = (w1 & 0xfff) + NAUDIO_MAIN;
int16_t gain = (int16_t)(uint16_t)w2;
uint16_t attenuation = w2 >> 16;
alist_overload(hle, dmem, NAUDIO_COUNT, gain, attenuation);
}
/* global functions */
void alist_process_naudio(struct hle_t* hle)
{
@ -308,7 +318,7 @@ void alist_process_naudio_dk(struct hle_t* hle)
void alist_process_naudio_mp3(struct hle_t* hle)
{
static const acmd_callback_t ABI[0x10] = {
UNKNOWN, ADPCM, CLEARBUFF, ENVMIXER,
OVERLOAD, ADPCM, CLEARBUFF, ENVMIXER,
LOADBUFF, RESAMPLE, SAVEBUFF, MP3,
MP3ADDY, SETVOL, DMEMMOVE, LOADADPCM,
MIXER, INTERLEAVE, NAUDIO_14, SETLOOP
@ -320,9 +330,20 @@ void alist_process_naudio_mp3(struct hle_t* hle)
void alist_process_naudio_cbfd(struct hle_t* hle)
{
/* TODO: see what differs from alist_process_naudio_mp3 */
/* What differs from alist_process_naudio_mp3?
*
* JoshW: It appears that despite being a newer game, CBFD appears to have a slightly older ucode version
* compared to JFG, B.T. et al.
* For naudio_mp3, the functions DMEM parameters have an additional protective AND on them
* (basically dmem & 0xffff).
* But there are minor differences are in the RESAMPLE and ENVMIXER functions.
* I don't think it is making any noticeable difference, as it could be just a simplification of the logic.
*
* bsmiles32: The only difference I could remember between mp3 and cbfd variants is in the MP3ADDY command.
* And the MP3 overlay is also different.
*/
static const acmd_callback_t ABI[0x10] = {
UNKNOWN, ADPCM, CLEARBUFF, ENVMIXER,
OVERLOAD, ADPCM, CLEARBUFF, ENVMIXER,
LOADBUFF, RESAMPLE, SAVEBUFF, MP3,
MP3ADDY, SETVOL, DMEMMOVE, LOADADPCM,
MIXER, INTERLEAVE, NAUDIO_14, SETLOOP

View file

@ -114,7 +114,9 @@ void hle_execute(struct hle_t* hle)
bool match = false;
struct cached_ucodes_t * cached_ucodes = &hle->cached_ucodes;
struct ucode_info_t *info = &cached_ucodes->infos[cached_ucodes->count-1];
struct ucode_info_t *info = NULL;
if (cached_ucodes->count > 0)
info = &cached_ucodes->infos[cached_ucodes->count-1];
for (int i = 0; i < cached_ucodes->count; i++)
{
if (info->uc_start == uc_start && info->uc_dstart == uc_dstart && info->uc_dsize == uc_dsize)