ARM differenties between architectures for memory alignment

ARM7TDMI (which is an ARMv4 implementation) does not allow misaligned
addressing at all. ARMv7 meanwhile does allow it

ARM emulation no longer logs misaligned addresses. but it does forward
the information to the developer interface

Faults window now has a Clear button and fixed table header
This commit is contained in:
JetSetIlly 2024-04-08 13:55:25 +01:00
parent 5b759fa0bf
commit 110d476389
7 changed files with 68 additions and 34 deletions

View file

@ -70,8 +70,8 @@ func (dev *Developer) BorrowYieldState(f func(yield.State)) {
// BorrowFaults will lock the illegal access log for the duration of the
// supplied fucntion, which will be executed with the illegal access log as an
// argument.
func (dev *Developer) BorrowFaults(f func(faults.Faults)) {
func (dev *Developer) BorrowFaults(f func(*faults.Faults)) {
dev.faultsLock.Lock()
defer dev.faultsLock.Unlock()
f(dev.faults)
f(&dev.faults)
}

View file

@ -25,10 +25,11 @@ type Category string
// List of valid Category values
const (
NullDereference Category = "null dereference"
StackCollision Category = "stack collision"
IllegalAddress Category = "illegal address"
UndefinedSymbol Category = "undefined symbol"
NullDereference Category = "null dereference"
MisalignedAccess Category = "misaligned access"
StackCollision Category = "stack collision"
IllegalAddress Category = "illegal address"
UndefinedSymbol Category = "undefined symbol"
)
// Entry is a single entry in the fault log
@ -73,6 +74,12 @@ func NewFaults() Faults {
}
}
// Clear all entries from faults log. Does not clear the HasStackCollision flag
func (flt *Faults) Clear() {
clear(flt.entries)
flt.Log = flt.Log[:0]
}
// WriteLog writes the list of faults in the order they were added
func (flt Faults) WriteLog(w io.Writer) {
for _, e := range flt.Log {

View file

@ -1437,7 +1437,7 @@ func (dbg *Debugger) processTokens(tokens *commandline.Tokens) error {
switch arg {
case "FAULTS":
dbg.CoProcDev.BorrowFaults(func(flt faults.Faults) {
dbg.CoProcDev.BorrowFaults(func(flt *faults.Faults) {
w := dbg.writerInStyle(terminal.StyleFeedback)
flt.WriteLog(w)
})

View file

@ -68,7 +68,7 @@ func (win *winCoProcFaults) debuggerDraw() bool {
title := fmt.Sprintf("%s %s", coproc.ProcessorID(), winCoProcFaultsID)
if imgui.BeginV(win.debuggerID(title), &win.debuggerOpen, imgui.WindowFlagsNone) {
win.img.dbg.CoProcDev.BorrowFaults(func(flt faults.Faults) {
win.img.dbg.CoProcDev.BorrowFaults(func(flt *faults.Faults) {
win.img.dbg.CoProcDev.BorrowSource(func(src *dwarf.Source) {
win.draw(flt, src)
})
@ -81,7 +81,7 @@ func (win *winCoProcFaults) debuggerDraw() bool {
return true
}
func (win *winCoProcFaults) draw(flt faults.Faults, src *dwarf.Source) {
func (win *winCoProcFaults) draw(flt *faults.Faults, src *dwarf.Source) {
// hasStackCollision to decide whether to issue warning in footer
hasStackCollision := false
@ -113,7 +113,7 @@ func (win *winCoProcFaults) draw(flt faults.Faults, src *dwarf.Source) {
imgui.TableSetupColumnV("Function", imgui.TableColumnFlagsNone, width*0.35, 3)
}
imgui.Spacing()
imgui.TableSetupScrollFreeze(0, 1)
imgui.TableHeadersRow()
for i := 0; i < len(flt.Log); i++ {
@ -216,10 +216,15 @@ func (win *winCoProcFaults) draw(flt faults.Faults, src *dwarf.Source) {
if src != nil {
imgui.Checkbox("Show Source in Tooltip", &win.showSrcInTooltip)
imgui.SameLineV(0, 20)
}
imgui.SameLineV(0, 20)
if imgui.Button("Clear") {
flt.Clear()
}
if hasStackCollision {
imgui.SameLineV(0, 20)
imgui.PushStyleColor(imgui.StyleColorText, win.img.cols.Warning)
imgui.AlignTextToFramePadding()
imgui.Text(fmt.Sprintf(" %c", fonts.Warning))
@ -231,9 +236,6 @@ func (win *winCoProcFaults) draw(flt faults.Faults, src *dwarf.Source) {
imgui.Text("Results of memory access is unreliable after a stack collision")
imgui.Text("and so memory faults are no longer being logged.")
}, true)
} else {
// empty call to imgui.Text to consume an earlier call to imgui.SameLineV()
imgui.Text("")
}
})
}

View file

@ -55,6 +55,9 @@ type Map struct {
CartArchitecture CartArchitecture
ARMArchitecture ARMArchitecture
// some ARM architectures allow misaligned accesses for some instructions
MisalignedAccesses bool
FlashOrigin uint32
FlashMemtop uint32
@ -116,6 +119,7 @@ func NewMap(cart CartArchitecture) Map {
case Harmony:
mmap.ARMArchitecture = ARM7TDMI
mmap.MisalignedAccesses = false
mmap.FlashOrigin = 0x00000000
mmap.FlashMemtop = 0x0fffffff
@ -141,6 +145,7 @@ func NewMap(cart CartArchitecture) Map {
case PlusCart:
mmap.ARMArchitecture = ARMv7_M
mmap.MisalignedAccesses = true
mmap.FlashOrigin = 0x20000000
mmap.FlashMemtop = 0x2fffffff

View file

@ -932,10 +932,12 @@ func (arm *ARM) run() (coprocessor.CoProcYield, float32) {
// handle memory access yields. we don't these want these to bleed out
// of the ARM unless the abort preference is set
if arm.state.yield.Type == coprocessor.YieldMemoryAccessError {
// if illegal memory accesses are to be ignored then we must log the
// yield information now before reset the yield type
// choosing not to log memory access errors. it can be far
// too noisy particular during the pre-execution disassembly
// stage. we could maybe improve this by indicating that we
// expect memory faults and then allowing logging during
// normal execution
if !arm.abortOnMemoryFault {
arm.logYield()
arm.resetYield()
}
}

View file

@ -20,7 +20,6 @@ import (
"github.com/jetsetilly/gopher2600/coprocessor"
"github.com/jetsetilly/gopher2600/coprocessor/developer/faults"
"github.com/jetsetilly/gopher2600/logger"
)
func (arm *ARM) memoryFault(event string, fault faults.Category, addr uint32) {
@ -46,6 +45,11 @@ func (arm *ARM) nullAccess(event string, addr uint32) {
arm.memoryFault(event, faults.NullDereference, addr)
}
// misalignedAccess is a special condition of illegalAccess()
func (arm *ARM) misalignedAccess(event string, addr uint32) {
arm.memoryFault(event, faults.MisalignedAccess, addr)
}
func (arm *ARM) read8bit(addr uint32) uint8 {
if addr < arm.mmap.NullAccessBoundary {
arm.nullAccess("Read 8bit", addr)
@ -121,16 +125,27 @@ func (arm *ARM) write8bit(addr uint32, val uint8) {
(*mem)[idx] = val
}
// requiresAlignment should be true only for certain instructions. alignment
// behaviour given in "A63.2.1 Alignment behaviour" of "ARMv7-M"
// for 16bit and 32bit access functions, there is a parameter called
// requiresAlignment. this indicates that the instruction issuing the access
// requires the access to be aligned.
//
// if the emulated architecture does not allow misaligned addresses then an
// appropriate alignment check is always made
//
// for the ARMv7-M architecture, alignment behaviour is given in "A63.2.1
// Alignment behaviour" of the specification
func (arm *ARM) read16bit(addr uint32, requiresAlignment bool) uint16 {
if addr < arm.mmap.NullAccessBoundary {
arm.nullAccess("Read 16bit", addr)
}
// check 16 bit alignment
if requiresAlignment && addr&0x01 != 0x00 {
logger.Logf("ARM7", "misaligned 16 bit read (%08x) (PC: %08x)", addr, arm.state.instructionPC)
if (requiresAlignment || !arm.mmap.MisalignedAccesses) && !IsAlignedTo16bits(addr) {
arm.misalignedAccess("Read 16bit", addr)
if !arm.mmap.MisalignedAccesses {
addr = AlignTo16bits(addr)
}
}
mem, origin := arm.mem.MapAddress(addr, false)
@ -172,16 +187,17 @@ func (arm *ARM) read16bit(addr uint32, requiresAlignment bool) uint16 {
return arm.byteOrder.Uint16((*mem)[idx:])
}
// requiresAlignment should be true only for certain instructions. alignment
// behaviour given in "A63.2.1 Alignment behaviour" of "ARMv7-M"
func (arm *ARM) write16bit(addr uint32, val uint16, requiresAlignment bool) {
if addr < arm.mmap.NullAccessBoundary {
arm.nullAccess("Write 16bit", addr)
}
// check 16 bit alignment
if requiresAlignment && addr&0x01 != 0x00 {
logger.Logf("ARM7", "misaligned 16 bit write (%08x) (PC: %08x)", addr, arm.state.instructionPC)
if (requiresAlignment || !arm.mmap.MisalignedAccesses) && !IsAlignedTo16bits(addr) {
arm.misalignedAccess("Read 16bit", addr)
if !arm.mmap.MisalignedAccesses {
addr = AlignTo16bits(addr)
}
}
mem, origin := arm.mem.MapAddress(addr, true)
@ -223,16 +239,17 @@ func (arm *ARM) write16bit(addr uint32, val uint16, requiresAlignment bool) {
arm.byteOrder.PutUint16((*mem)[idx:], val)
}
// requiresAlignment should be true only for certain instructions. alignment
// behaviour given in "A63.2.1 Alignment behaviour" of "ARMv7-M"
func (arm *ARM) read32bit(addr uint32, requiresAlignment bool) uint32 {
if addr < arm.mmap.NullAccessBoundary {
arm.nullAccess("Read 32bit", addr)
}
// check 32 bit alignment
if requiresAlignment && addr&0x03 != 0x00 {
logger.Logf("ARM7", "misaligned 32 bit read (%08x) (PC: %08x)", addr, arm.state.instructionPC)
if (requiresAlignment || !arm.mmap.MisalignedAccesses) && !IsAlignedTo32bits(addr) {
arm.misalignedAccess("Read 32bit", addr)
if !arm.mmap.MisalignedAccesses {
addr = AlignTo32bits(addr)
}
}
mem, origin := arm.mem.MapAddress(addr, false)
@ -274,16 +291,17 @@ func (arm *ARM) read32bit(addr uint32, requiresAlignment bool) uint32 {
return arm.byteOrder.Uint32((*mem)[idx:])
}
// requiresAlignment should be true only for certain instructions. alignment
// behaviour given in "A63.2.1 Alignment behaviour" of "ARMv7-M"
func (arm *ARM) write32bit(addr uint32, val uint32, requiresAlignment bool) {
if addr < arm.mmap.NullAccessBoundary {
arm.nullAccess("Write 32bit", addr)
}
// check 32 bit alignment
if requiresAlignment && addr&0x03 != 0x00 {
logger.Logf("ARM7", "misaligned 32 bit write (%08x) (PC: %08x)", addr, arm.state.instructionPC)
if (requiresAlignment || !arm.mmap.MisalignedAccesses) && !IsAlignedTo32bits(addr) {
arm.misalignedAccess("Write 32bit", addr)
if !arm.mmap.MisalignedAccesses {
addr = AlignTo32bits(addr)
}
}
mem, origin := arm.mem.MapAddress(addr, true)