mirror of
https://github.com/Inori/GPCS4.git
synced 2024-06-01 19:08:08 -04:00
detile texture
not finished
This commit is contained in:
parent
e643efa965
commit
405c4f6871
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "Violet/VltDevice.h"
|
||||
#include "Violet/VltContext.h"
|
||||
#include "PlatMemory.h"
|
||||
|
||||
using namespace sce::vlt;
|
||||
|
||||
|
@ -96,11 +97,20 @@ namespace sce::Gnm
|
|||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto formatInfo = imageFormatInfo(image->info().format);
|
||||
auto textureFormat = tsharp->getDataFormat();
|
||||
uint32_t bytesPerElement = textureFormat.getTotalBytesPerElement();
|
||||
bool isCompressed = textureFormat.isBlockCompressedFormat();
|
||||
const uint8_t* textureMem = reinterpret_cast<uint8_t*>(tsharp->getBaseAddress());
|
||||
auto formatInfo = imageFormatInfo(image->info().format);
|
||||
auto textureFormat = tsharp->getDataFormat();
|
||||
uint32_t bytesPerElement = textureFormat.getTotalBytesPerElement();
|
||||
bool isCompressed = textureFormat.isBlockCompressedFormat();
|
||||
auto tileMode = tsharp->getTileMode();
|
||||
uint8_t* textureMem = reinterpret_cast<uint8_t*>(tsharp->getBaseAddress());
|
||||
|
||||
if (image->info().extent.width == 256 && image->info().extent.height == 16)
|
||||
{
|
||||
__debugbreak();
|
||||
}
|
||||
|
||||
bool isTiled = tileMode != kTileModeDisplay_LinearAligned &&
|
||||
tileMode != kTileModeDisplay_LinearGeneral;
|
||||
|
||||
for (uint32_t layer = 0; layer < image->info().numLayers; layer++)
|
||||
{
|
||||
|
@ -132,13 +142,32 @@ namespace sce::Gnm
|
|||
uint64_t surfaceSize = 0;
|
||||
GpuAddress::computeTextureSurfaceOffsetAndSize(
|
||||
&surfaceOffset, &surfaceSize, tsharp, level, layer);
|
||||
const void* memory = textureMem + surfaceOffset;
|
||||
void* memory = textureMem + surfaceOffset;
|
||||
|
||||
if (isTiled)
|
||||
{
|
||||
// allocate enough memory
|
||||
uint64_t untiledSize = 0;
|
||||
AlignmentType align = 0;
|
||||
GpuAddress::computeUntiledSurfaceSize(&untiledSize, &align, ¶ms);
|
||||
void* untiled = plat::aligned_malloc(align, untiledSize);
|
||||
// detail surface
|
||||
// TODO:
|
||||
// Should be done on GPU using compute shader
|
||||
detileSurface(untiled, memory, ¶ms);
|
||||
memory = untiled;
|
||||
}
|
||||
|
||||
m_context->uploadImage(
|
||||
image, subresourceLayers,
|
||||
memory,
|
||||
pitchInBytes,
|
||||
surfaceInfo.m_surfaceSize);
|
||||
|
||||
if (isTiled)
|
||||
{
|
||||
plat::aligned_free(memory);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -2,233 +2,230 @@
|
|||
|
||||
LOG_CHANNEL(Platform.UtilMemory);
|
||||
|
||||
namespace plat
|
||||
{
|
||||
|
||||
|
||||
|
||||
#ifdef GPCS4_WINDOWS
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <Windows.h>
|
||||
#undef WIN32_LEAN_AND_MEAN
|
||||
#else
|
||||
|
||||
// GPCS4 flag to Windows flag
|
||||
inline uint32_t GetProtectFlag(VM_PROTECT_FLAG nOldFlag)
|
||||
{
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
if (nOldFlag & VMPF_NOACCESS)
|
||||
{
|
||||
nNewFlag = PAGE_NOACCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (nOldFlag & VMPF_CPU_READ)
|
||||
{
|
||||
nNewFlag = PAGE_READONLY;
|
||||
}
|
||||
|
||||
if (nOldFlag & VMPF_CPU_WRITE)
|
||||
{
|
||||
nNewFlag = PAGE_READWRITE;
|
||||
}
|
||||
|
||||
if (nOldFlag & VMPF_CPU_EXEC)
|
||||
{
|
||||
nNewFlag = PAGE_EXECUTE_READWRITE;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
return nNewFlag;
|
||||
}
|
||||
|
||||
// Windows flag to GPCS4 flag
|
||||
inline VM_PROTECT_FLAG RecoverProtectFlag(uint32_t nOldFlag)
|
||||
{
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
if (nOldFlag & PAGE_NOACCESS)
|
||||
{
|
||||
nNewFlag = VMPF_NOACCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (nOldFlag & PAGE_READONLY)
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_READ;
|
||||
}
|
||||
|
||||
if (nOldFlag & PAGE_READWRITE)
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_WRITE;
|
||||
}
|
||||
|
||||
if ((nOldFlag & PAGE_EXECUTE) ||
|
||||
(nOldFlag & PAGE_EXECUTE_READ) ||
|
||||
(nOldFlag & PAGE_EXECUTE_READWRITE))
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_EXEC;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
return static_cast<VM_PROTECT_FLAG>(nNewFlag);
|
||||
}
|
||||
|
||||
VM_REGION_STATE GetRegionState(uint32_t nOldState)
|
||||
{
|
||||
VM_REGION_STATE nNewState = VMRS_FREE;
|
||||
if (nOldState == MEM_COMMIT)
|
||||
{
|
||||
nNewState = VMRS_COMMIT;
|
||||
}
|
||||
else if (nOldState == MEM_RESERVE)
|
||||
{
|
||||
nNewState = VMRS_RESERVE;
|
||||
}
|
||||
else if (nOldState == MEM_FREE)
|
||||
{
|
||||
nNewState = VMRS_FREE;
|
||||
}
|
||||
return nNewState;
|
||||
}
|
||||
|
||||
inline uint32_t GetTypeFlag(VM_ALLOCATION_TYPE nOldFlag)
|
||||
{
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
if (nOldFlag & VMAT_RESERVE)
|
||||
{
|
||||
nNewFlag |= MEM_RESERVE;
|
||||
}
|
||||
|
||||
if (nOldFlag & VMAT_COMMIT)
|
||||
{
|
||||
nNewFlag |= MEM_COMMIT;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
return nNewFlag;
|
||||
}
|
||||
|
||||
void* VMAllocate(void* pAddress, size_t nSize,
|
||||
VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect)
|
||||
{
|
||||
DWORD dwType = GetTypeFlag(nType);
|
||||
DWORD dwProtect = GetProtectFlag(nProtect);
|
||||
|
||||
return VirtualAlloc(pAddress, nSize, dwType, dwProtect);
|
||||
}
|
||||
|
||||
void* VMAllocateAlign(void* pAddress, size_t nSize, size_t nAlign,
|
||||
VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect)
|
||||
{
|
||||
|
||||
#ifdef GPCS4_DEBUG
|
||||
// This make it easier to find a function a Ida Pro.
|
||||
// When aligned with debugAlign, the loaded image address
|
||||
// is exactly the same as it in Ida Pro.
|
||||
// For example, without this debug align,
|
||||
// a function address may be 0x00000002b0240e21,
|
||||
// but with this align, you can search for sub_240e21
|
||||
// in Ida directly.
|
||||
const uint32_t debugAlign = 0x10000000;
|
||||
nAlign = debugAlign;
|
||||
#endif
|
||||
|
||||
void* pAlignedAddr = nullptr;
|
||||
do
|
||||
namespace plat
|
||||
{
|
||||
|
||||
#ifdef GPCS4_WINDOWS
|
||||
// GPCS4 flag to Windows flag
|
||||
inline uint32_t GetProtectFlag(VM_PROTECT_FLAG nOldFlag)
|
||||
{
|
||||
DWORD dwProtect = GetProtectFlag(nProtect);
|
||||
void* pAddr = VirtualAlloc(pAddress, nSize, MEM_RESERVE, dwProtect);
|
||||
uintptr_t pRefAddr = util::align((uintptr_t)pAddr, nAlign);
|
||||
|
||||
if (pAddr)
|
||||
{
|
||||
VirtualFree(pAddr, 0, MEM_RELEASE);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
pAlignedAddr = VirtualAlloc((void*)pRefAddr, nSize, MEM_RESERVE | MEM_COMMIT, dwProtect);
|
||||
pRefAddr += nAlign;
|
||||
} while (pAlignedAddr == nullptr);
|
||||
if (nOldFlag & VMPF_NOACCESS)
|
||||
{
|
||||
nNewFlag = PAGE_NOACCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
if (nOldFlag & VMPF_CPU_READ)
|
||||
{
|
||||
nNewFlag = PAGE_READONLY;
|
||||
}
|
||||
|
||||
return pAlignedAddr;
|
||||
if (nOldFlag & VMPF_CPU_WRITE)
|
||||
{
|
||||
nNewFlag = PAGE_READWRITE;
|
||||
}
|
||||
|
||||
}
|
||||
if (nOldFlag & VMPF_CPU_EXEC)
|
||||
{
|
||||
nNewFlag = PAGE_EXECUTE_READWRITE;
|
||||
}
|
||||
|
||||
void VMFree(void* pAddress)
|
||||
{
|
||||
// MSDN:
|
||||
// If the dwFreeType parameter is MEM_RELEASE, this parameter(dwSize) must be 0(zero).
|
||||
// The function frees the entire region that is reserved in the initial allocation call to VirtualAlloc.
|
||||
VirtualFree(pAddress, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
bool VMProtect(void* pAddress, size_t nSize,
|
||||
VM_PROTECT_FLAG nNewProtect, VM_PROTECT_FLAG* pOldProtect)
|
||||
{
|
||||
DWORD dwNewProtect = GetProtectFlag(nNewProtect);
|
||||
DWORD dwOldProtect = 0;
|
||||
BOOL bSuc = VirtualProtect(pAddress, nSize, dwNewProtect, &dwOldProtect);
|
||||
if (pOldProtect)
|
||||
{
|
||||
*pOldProtect = RecoverProtectFlag(dwOldProtect);
|
||||
} while (false);
|
||||
return nNewFlag;
|
||||
}
|
||||
return bSuc;
|
||||
}
|
||||
|
||||
bool VMQuery(void* pAddress, MemoryInformation* pInfo)
|
||||
{
|
||||
bool ret = false;
|
||||
do
|
||||
// Windows flag to GPCS4 flag
|
||||
inline VM_PROTECT_FLAG RecoverProtectFlag(uint32_t nOldFlag)
|
||||
{
|
||||
MEMORY_BASIC_INFORMATION mbi = {};
|
||||
if (VirtualQuery(pAddress, &mbi, sizeof(mbi)) == 0)
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nOldFlag & PAGE_NOACCESS)
|
||||
{
|
||||
nNewFlag = VMPF_NOACCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
pInfo->pRegionStart = mbi.BaseAddress;
|
||||
pInfo->nRegionSize = mbi.RegionSize;
|
||||
pInfo->nRegionState = GetRegionState(mbi.State);
|
||||
pInfo->nRegionProtect = RecoverProtectFlag(mbi.Protect);
|
||||
if (nOldFlag & PAGE_READONLY)
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_READ;
|
||||
}
|
||||
|
||||
ret = true;
|
||||
} while (false);
|
||||
return ret;
|
||||
}
|
||||
if (nOldFlag & PAGE_READWRITE)
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_WRITE;
|
||||
}
|
||||
|
||||
void* aligned_malloc(size_t align, size_t size)
|
||||
{
|
||||
_set_errno(0);
|
||||
void* ret = _aligned_malloc(size, align);
|
||||
if (errno == ENOMEM)
|
||||
{
|
||||
ret = nullptr;
|
||||
if ((nOldFlag & PAGE_EXECUTE) ||
|
||||
(nOldFlag & PAGE_EXECUTE_READ) ||
|
||||
(nOldFlag & PAGE_EXECUTE_READWRITE))
|
||||
{
|
||||
nNewFlag |= VMPF_CPU_EXEC;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
return static_cast<VM_PROTECT_FLAG>(nNewFlag);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void aligned_free(void* ptr)
|
||||
{
|
||||
_aligned_free(ptr);
|
||||
}
|
||||
VM_REGION_STATE GetRegionState(uint32_t nOldState)
|
||||
{
|
||||
VM_REGION_STATE nNewState = VMRS_FREE;
|
||||
if (nOldState == MEM_COMMIT)
|
||||
{
|
||||
nNewState = VMRS_COMMIT;
|
||||
}
|
||||
else if (nOldState == MEM_RESERVE)
|
||||
{
|
||||
nNewState = VMRS_RESERVE;
|
||||
}
|
||||
else if (nOldState == MEM_FREE)
|
||||
{
|
||||
nNewState = VMRS_FREE;
|
||||
}
|
||||
return nNewState;
|
||||
}
|
||||
|
||||
inline uint32_t GetTypeFlag(VM_ALLOCATION_TYPE nOldFlag)
|
||||
{
|
||||
uint32_t nNewFlag = 0;
|
||||
do
|
||||
{
|
||||
if (nOldFlag & VMAT_RESERVE)
|
||||
{
|
||||
nNewFlag |= MEM_RESERVE;
|
||||
}
|
||||
|
||||
if (nOldFlag & VMAT_COMMIT)
|
||||
{
|
||||
nNewFlag |= MEM_COMMIT;
|
||||
}
|
||||
|
||||
} while (false);
|
||||
return nNewFlag;
|
||||
}
|
||||
|
||||
void* VMAllocate(void* pAddress, size_t nSize, VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect)
|
||||
{
|
||||
DWORD dwType = GetTypeFlag(nType);
|
||||
DWORD dwProtect = GetProtectFlag(nProtect);
|
||||
|
||||
return VirtualAlloc(pAddress, nSize, dwType, dwProtect);
|
||||
}
|
||||
|
||||
void* VMAllocateAlign(void* pAddress, size_t nSize, size_t nAlign, VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect)
|
||||
{
|
||||
|
||||
#ifdef GPCS4_DEBUG
|
||||
// This make it easier to find a function a Ida Pro.
|
||||
// When aligned with debugAlign, the loaded image address
|
||||
// is exactly the same as it in Ida Pro.
|
||||
// For example, without this debug align,
|
||||
// a function address may be 0x00000002b0240e21,
|
||||
// but with this align, you can search for sub_240e21
|
||||
// in Ida directly.
|
||||
const uint32_t debugAlign = 0x10000000;
|
||||
nAlign = debugAlign;
|
||||
#endif
|
||||
|
||||
void* pAlignedAddr = nullptr;
|
||||
do
|
||||
{
|
||||
DWORD dwProtect = GetProtectFlag(nProtect);
|
||||
void* pAddr = VirtualAlloc(pAddress, nSize, MEM_RESERVE, dwProtect);
|
||||
uintptr_t pRefAddr = util::align((uintptr_t)pAddr, nAlign);
|
||||
|
||||
if (pAddr)
|
||||
{
|
||||
VirtualFree(pAddr, 0, MEM_RELEASE);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
pAlignedAddr = VirtualAlloc((void*)pRefAddr, nSize, MEM_RESERVE | MEM_COMMIT, dwProtect);
|
||||
pRefAddr += nAlign;
|
||||
} while (pAlignedAddr == nullptr);
|
||||
|
||||
} while (false);
|
||||
|
||||
return pAlignedAddr;
|
||||
}
|
||||
|
||||
void VMFree(void* pAddress)
|
||||
{
|
||||
// MSDN:
|
||||
// If the dwFreeType parameter is MEM_RELEASE, this parameter(dwSize) must be 0(zero).
|
||||
// The function frees the entire region that is reserved in the initial allocation call to VirtualAlloc.
|
||||
VirtualFree(pAddress, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
bool VMProtect(void* pAddress, size_t nSize, VM_PROTECT_FLAG nNewProtect, VM_PROTECT_FLAG* pOldProtect)
|
||||
{
|
||||
DWORD dwNewProtect = GetProtectFlag(nNewProtect);
|
||||
DWORD dwOldProtect = 0;
|
||||
BOOL bSuc = VirtualProtect(pAddress, nSize, dwNewProtect, &dwOldProtect);
|
||||
if (pOldProtect)
|
||||
{
|
||||
*pOldProtect = RecoverProtectFlag(dwOldProtect);
|
||||
}
|
||||
return bSuc;
|
||||
}
|
||||
|
||||
bool VMQuery(void* pAddress, MemoryInformation* pInfo)
|
||||
{
|
||||
bool ret = false;
|
||||
do
|
||||
{
|
||||
MEMORY_BASIC_INFORMATION mbi = {};
|
||||
if (VirtualQuery(pAddress, &mbi, sizeof(mbi)) == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
pInfo->pRegionStart = mbi.BaseAddress;
|
||||
pInfo->nRegionSize = mbi.RegionSize;
|
||||
pInfo->nRegionState = GetRegionState(mbi.State);
|
||||
pInfo->nRegionProtect = RecoverProtectFlag(mbi.Protect);
|
||||
|
||||
ret = true;
|
||||
} while (false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void* aligned_malloc(size_t align, size_t size)
|
||||
{
|
||||
_set_errno(0);
|
||||
void* ret = _aligned_malloc(size, align);
|
||||
if (errno == ENOMEM)
|
||||
{
|
||||
ret = nullptr;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void aligned_free(void* ptr)
|
||||
{
|
||||
_aligned_free(ptr);
|
||||
}
|
||||
|
||||
#elif defined(GPCS4_LINUX)
|
||||
|
||||
//TODO: Other platform implementation
|
||||
// TODO: Other platform implementation
|
||||
|
||||
#endif //GPCS4_WINDOWS
|
||||
#endif // GPCS4_WINDOWS
|
||||
|
||||
}
|
||||
} // namespace plat
|
|
@ -3,83 +3,77 @@
|
|||
|
||||
#include <memory>
|
||||
|
||||
//Virtual memory stuffs
|
||||
// NOTE:
|
||||
// these functions are just platform abstraction,
|
||||
// and shouldn't be called from HLE functions directly,
|
||||
// instead, they should be used by emulator code.
|
||||
// For HLE use, see Memory.h
|
||||
// Virtual memory stuffs
|
||||
// NOTE:
|
||||
// these functions are just platform abstraction,
|
||||
// and shouldn't be called from HLE functions directly,
|
||||
// instead, they should be used by emulator code.
|
||||
// For HLE use, see Memory.h
|
||||
|
||||
namespace plat
|
||||
{
|
||||
|
||||
constexpr uint32_t VM_PAGE_SIZE = 0x1000;
|
||||
constexpr uint32_t VM_PAGE_SIZE = 0x1000;
|
||||
|
||||
|
||||
enum VM_PROTECT_FLAG
|
||||
{
|
||||
VMPF_NOACCESS = 0x00000001,
|
||||
VMPF_CPU_READ = 0x00000002,
|
||||
VMPF_CPU_WRITE = 0x00000004,
|
||||
VMPF_CPU_EXEC = 0x00000008,
|
||||
VMPF_CPU_RW = VMPF_CPU_READ | VMPF_CPU_WRITE,
|
||||
VMPF_CPU_RWX = VMPF_CPU_READ | VMPF_CPU_WRITE | VMPF_CPU_EXEC,
|
||||
};
|
||||
|
||||
enum VM_ALLOCATION_TYPE
|
||||
{
|
||||
VMAT_RESERVE = 0x00000001,
|
||||
VMAT_COMMIT = 0x00000010,
|
||||
VMAT_RESERVE_COMMIT = VMAT_RESERVE | VMAT_COMMIT
|
||||
};
|
||||
|
||||
enum VM_REGION_STATE
|
||||
{
|
||||
VMRS_FREE,
|
||||
VMRS_COMMIT,
|
||||
VMRS_RESERVE,
|
||||
};
|
||||
|
||||
struct MemoryInformation
|
||||
{
|
||||
void* pRegionStart;
|
||||
size_t nRegionSize;
|
||||
VM_REGION_STATE nRegionState;
|
||||
VM_PROTECT_FLAG nRegionProtect;
|
||||
};
|
||||
|
||||
void* VMAllocate(void* pAddress, size_t nSize,
|
||||
VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect);
|
||||
|
||||
void* VMAllocateAlign(void* pAddress, size_t nSize, size_t nAlign,
|
||||
VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect);
|
||||
|
||||
void VMFree(void* pAddress);
|
||||
|
||||
bool VMProtect(void* pAddress, size_t nSize,
|
||||
VM_PROTECT_FLAG nNewProtect, VM_PROTECT_FLAG* pOldProtect = nullptr);
|
||||
|
||||
bool VMQuery(void* pAddress, MemoryInformation* pInfo);
|
||||
|
||||
struct MemoryUnMapper
|
||||
{
|
||||
void operator()(void* pMem) const noexcept
|
||||
enum VM_PROTECT_FLAG
|
||||
{
|
||||
if (pMem != nullptr)
|
||||
VMPF_NOACCESS = 0x00000001,
|
||||
VMPF_CPU_READ = 0x00000002,
|
||||
VMPF_CPU_WRITE = 0x00000004,
|
||||
VMPF_CPU_EXEC = 0x00000008,
|
||||
VMPF_CPU_RW = VMPF_CPU_READ | VMPF_CPU_WRITE,
|
||||
VMPF_CPU_RWX = VMPF_CPU_READ | VMPF_CPU_WRITE | VMPF_CPU_EXEC,
|
||||
};
|
||||
|
||||
enum VM_ALLOCATION_TYPE
|
||||
{
|
||||
VMAT_RESERVE = 0x00000001,
|
||||
VMAT_COMMIT = 0x00000010,
|
||||
VMAT_RESERVE_COMMIT = VMAT_RESERVE | VMAT_COMMIT
|
||||
};
|
||||
|
||||
enum VM_REGION_STATE
|
||||
{
|
||||
VMRS_FREE,
|
||||
VMRS_COMMIT,
|
||||
VMRS_RESERVE,
|
||||
};
|
||||
|
||||
struct MemoryInformation
|
||||
{
|
||||
void* pRegionStart;
|
||||
size_t nRegionSize;
|
||||
VM_REGION_STATE nRegionState;
|
||||
VM_PROTECT_FLAG nRegionProtect;
|
||||
};
|
||||
|
||||
void* VMAllocate(void* pAddress, size_t nSize, VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect);
|
||||
|
||||
void* VMAllocateAlign(void* pAddress, size_t nSize, size_t nAlign, VM_ALLOCATION_TYPE nType, VM_PROTECT_FLAG nProtect);
|
||||
|
||||
void VMFree(void* pAddress);
|
||||
|
||||
bool VMProtect(void* pAddress, size_t nSize, VM_PROTECT_FLAG nNewProtect, VM_PROTECT_FLAG* pOldProtect = nullptr);
|
||||
|
||||
bool VMQuery(void* pAddress, MemoryInformation* pInfo);
|
||||
|
||||
struct MemoryUnMapper
|
||||
{
|
||||
void operator()(void* pMem) const noexcept
|
||||
{
|
||||
VMFree(pMem);
|
||||
if (pMem != nullptr)
|
||||
{
|
||||
VMFree(pMem);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// auto release smart memory pointer
|
||||
typedef std::unique_ptr<uint8_t, MemoryUnMapper> memory_ptr;
|
||||
// auto release smart memory pointer
|
||||
typedef std::unique_ptr<uint8_t, MemoryUnMapper> memory_ptr;
|
||||
|
||||
// aligned malloc and free
|
||||
void* aligned_malloc(size_t align, size_t size);
|
||||
|
||||
// aligned malloc and free
|
||||
void* aligned_malloc(size_t align, size_t size);
|
||||
void aligned_free(void* ptr);
|
||||
|
||||
void aligned_free(void* ptr);
|
||||
|
||||
|
||||
}
|
||||
} // namespace plat
|
Loading…
Reference in a new issue