Implemented basic tracing support.

When built with the "trace" feature the emulator now supports tracing
variables for debugging purposes.
This commit is contained in:
Lionel Flandrin 2016-09-01 22:53:57 +02:00 committed by Lionel Flandrin
parent 902bc1cfbf
commit 9848d673e3
16 changed files with 351 additions and 90 deletions

View file

@ -6,4 +6,5 @@ before_script:
- cargo -V
script:
- cargo build -v
- cargo build -v --features trace
- cargo test

View file

@ -13,12 +13,16 @@ readme = "README.md"
license = "GPL-2.0+"
keywords = ["emulator", "playstation"]
[features]
trace = [ "lazy_static" ]
[dependencies]
shaman = "0.1.0"
log = "0.3.4"
arrayvec = "0.3.15"
rustc-serialize = "0.3.19"
lazy_static = { version = "0.2.1", optional = true }
[lib]
name = "rustation"

View file

@ -653,7 +653,7 @@ impl CdRom {
if self.irq() {
// Interrupt rising edge
shared.irq_state().assert(Interrupt::CdRom);
shared.irq_state_mut().assert(Interrupt::CdRom);
}
}

View file

@ -12,6 +12,7 @@ use shared::SharedState;
use gpu::renderer::Renderer;
use interrupt::InterruptState;
use debugger::Debugger;
use tracer::module_tracer;
use self::cop0::{Cop0, Exception};
use self::gte::Gte;
@ -57,7 +58,6 @@ pub struct Cpu {
}
impl Cpu {
/// Create a new CPU instance
pub fn new(inter: Interconnect) -> Cpu {
// Not sure what the reset values are...
@ -159,6 +159,17 @@ impl Cpu {
if self.cop0.irq_active(*shared.irq_state()) {
shared.counters_mut().cpu_interrupt.increment();
module_tracer("CPU", |m| {
let now = shared.tk().now();
m.trace(now,
"irq_count",
shared.counters_mut().cpu_interrupt.get());
m.trace(now,
"irq_pc",
self.current_pc);
});
if instruction.is_gte_op() {
// GTE instructions get executed even if an interrupt
// occurs
@ -167,6 +178,11 @@ impl Cpu {
shared,
renderer);
}
// XXX No idea how long the interrupt switch takes on the
// real hardware?
shared.tk().tick(1);
self.exception(Exception::Interrupt);
} else {
// No interrupt pending, run the current instruction
@ -297,7 +313,7 @@ impl Cpu {
}
/// Handle writes when the cache is isolated
pub fn cache_maintenance<T: Addressable>(&mut self, addr: u32, val: u32) {
pub fn cache_maintenance<A: Addressable>(&mut self, addr: u32, val: u32) {
// Implementing full cache emulation requires handling many
// corner cases. For now I'm just going to add support for
// cache invalidation which is the only use case for cache
@ -309,7 +325,7 @@ impl Cpu {
panic!("Cache maintenance while instruction cache is disabled");
}
if T::size() != 4 || val != 0 {
if A::size() != 4 || val != 0 {
panic!("Unsupported write while cache is isolated: {:08x}",
val);
}

View file

@ -7,7 +7,6 @@ use gpu::{Gpu, VideoClock};
use gpu::renderer::{Renderer, PrimitiveAttributes, Vertex};
use memory::{Interconnect, Addressable};
use memory;
use debugger::DummyDebugger;
use shared::SharedState;
use bios::Bios;
@ -83,7 +82,6 @@ fn test_beq() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -116,7 +114,7 @@ fn test_beq() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -131,7 +129,6 @@ fn test_branch_in_branch_delay() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -159,7 +156,7 @@ fn test_branch_in_branch_delay() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -175,7 +172,6 @@ fn test_lwr_and_lwr_load_delay() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -241,7 +237,7 @@ fn test_lwr_and_lwr_load_delay() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -272,7 +268,6 @@ fn test_add_1() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -298,7 +293,7 @@ fn test_add_1() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -317,7 +312,6 @@ fn test_arithmetic_branching_test() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -344,7 +338,7 @@ fn test_arithmetic_branching_test() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -360,7 +354,6 @@ fn test_bltzal_and_bgezal() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -408,7 +401,7 @@ fn test_bltzal_and_bgezal() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -429,7 +422,6 @@ fn test_unaligned_loads() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -453,7 +445,7 @@ fn test_unaligned_loads() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -470,7 +462,6 @@ fn test_load_delay_for_cop() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -502,7 +493,7 @@ fn test_load_delay_for_cop() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -517,7 +508,6 @@ fn test_swl_and_swr() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -563,7 +553,7 @@ fn test_swl_and_swr() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -584,7 +574,6 @@ fn test_multiple_load_cancelling() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -612,7 +601,7 @@ fn test_multiple_load_cancelling() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -627,7 +616,6 @@ fn test_lwl_and_lwr() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -685,7 +673,7 @@ fn test_lwl_and_lwr() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);
@ -716,7 +704,6 @@ fn test_lh_and_lb_sign_extension() {
let inter = Interconnect::new(bios, gpu, None);
let mut cpu = Cpu::new(inter);
let mut shared = SharedState::new();
let mut debugger = DummyDebugger;
let mut renderer = DummyRenderer;
for r in 0..31 {
@ -742,7 +729,7 @@ fn test_lh_and_lb_sign_extension() {
timeout = false;
break;
}
cpu.run_next_instruction(&mut debugger, &mut shared, &mut renderer);
cpu.run_next_instruction(&mut (), &mut shared, &mut renderer);
}
assert!(timeout == false);

View file

@ -21,9 +21,7 @@ pub trait Debugger {
/// Dummy debugger implementation that does nothing. Can be used when
/// debugging is disabled.
pub struct DummyDebugger;
impl Debugger for DummyDebugger {
impl Debugger for () {
fn trigger_break(&mut self) {
}

View file

@ -285,7 +285,7 @@ impl Gpu {
if !self.vblank_interrupt && vblank_interrupt {
// Rising edge of the vblank interrupt
shared.irq_state().assert(Interrupt::VBlank);
shared.irq_state_mut().assert(Interrupt::VBlank);
}
if self.vblank_interrupt && !vblank_interrupt {

View file

@ -5,10 +5,16 @@ extern crate cdimage;
extern crate arrayvec;
extern crate rustc_serialize;
#[cfg(feature = "trace")]
#[macro_use]
extern crate lazy_static;
#[macro_use]
mod box_array;
#[macro_use]
mod serializer;
#[macro_use]
pub mod tracer;
pub mod gpu;
pub mod cdrom;

View file

@ -1,5 +1,6 @@
use memory::Addressable;
use shared::SharedState;
use tracer::module_tracer;
/// Motion Decoder (sometimes called macroblock or movie decoder).
#[derive(RustcDecodable, RustcEncodable)]
@ -44,12 +45,12 @@ impl MDec {
}
}
pub fn load<T: Addressable>(&mut self,
pub fn load<A: Addressable>(&mut self,
_: &mut SharedState,
offset: u32) -> u32 {
if T::size() != 4 {
panic!("Unhandled MDEC load ({})", T::size());
if A::size() != 4 {
panic!("Unhandled MDEC load ({})", A::size());
}
match offset {
@ -59,17 +60,17 @@ impl MDec {
}
pub fn store<T: Addressable>(&mut self,
_: &mut SharedState,
pub fn store<A: Addressable>(&mut self,
shared: &mut SharedState,
offset: u32,
val: u32) {
if T::size() != 4 {
panic!("Unhandled MDEC store ({})", T::size());
if A::size() != 4 {
panic!("Unhandled MDEC store ({})", A::size());
}
match offset {
0 => self.command(val),
0 => self.command(shared, val),
4 => self.set_control(val),
_ => panic!("Unhandled MDEC store: {:08x} {:08x}", offset, val),
}
@ -108,7 +109,14 @@ impl MDec {
}
/// Handle writes to the command register
pub fn command(&mut self, cmd: u32) {
pub fn command(&mut self, shared: &mut SharedState, cmd: u32) {
module_tracer("MDEC", |m| {
m.trace(shared.tk().now(),
"command_word",
cmd);
});
self.command_remaining -= 1;
(self.command_handler)(self, cmd);
@ -146,7 +154,10 @@ impl MDec {
false => (16, MDec::handle_monochrome_quant_matrix),
},
3 => (32, MDec::handle_idct_matrix),
n => panic!("Unsupported MDEC opcode {} ({:08x})", n, cmd),
n => {
warn!("Unsupported MDEC opcode {} ({:08x})", n, cmd);
(1, MDec::handle_command)
}
};
self.command_remaining = len;
@ -209,7 +220,7 @@ impl MDec {
}
}
callback!(struct CommandHandler(fn (&mut MDec, u32)) {
callback!(struct CommandHandler (fn(&mut MDec, u32)) {
MDec::handle_command,
MDec::handle_color_quant_matrices,
MDec::handle_monochrome_quant_matrix,

View file

@ -1,6 +1,8 @@
use shared::SharedState;
use interrupt::Interrupt;
use tracer::SizedValue;
/// Direct Memory Access
#[derive(RustcDecodable, RustcEncodable)]
pub struct Dma {
@ -91,7 +93,7 @@ impl Dma {
if !prev_irq && self.irq() {
// Rising edge of the done interrupt
shared.irq_state().assert(Interrupt::Dma);
shared.irq_state_mut().assert(Interrupt::Dma);
}
}
@ -120,7 +122,7 @@ impl Dma {
if !prev_irq && self.irq() {
// Rising edge of the done interrupt
shared.irq_state().assert(Interrupt::Dma);
shared.irq_state_mut().assert(Interrupt::Dma);
}
}
}
@ -323,6 +325,12 @@ pub enum Sync {
LinkedList = 2,
}
impl From<Sync> for SizedValue {
fn from(v: Sync) -> SizedValue {
SizedValue(v as u32, 2)
}
}
/// The 7 DMA ports
#[derive(Clone, Copy, PartialEq, Eq, Debug, RustcDecodable, RustcEncodable)]
pub enum Port {
@ -356,3 +364,9 @@ impl Port {
}
}
}
impl From<Port> for SizedValue {
fn from(v: Port) -> SizedValue {
SizedValue(v as u32, 3)
}
}

View file

@ -18,6 +18,7 @@ use padmemcard::PadMemCard;
use mdec::MDec;
use parallel_io::ParallelIo;
use debug_uart::DebugUart;
use tracer::module_tracer;
/// Global interconnect
#[derive(RustcDecodable, RustcEncodable)]
@ -168,7 +169,7 @@ impl Interconnect {
}
/// Interconnect: load value at `addr`
pub fn load<T: Addressable>(&mut self,
pub fn load<A: Addressable>(&mut self,
shared: &mut SharedState,
addr: u32) -> u32 {
// XXX Since I don't implement CPU pipelining correctly for
@ -180,7 +181,7 @@ impl Interconnect {
let abs_addr = map::mask_region(addr);
if let Some(offset) = map::RAM.contains(abs_addr) {
return self.ram.load::<T>(offset);
return self.ram.load::<A>(offset);
}
if let Some(offset) = map::SCRATCH_PAD.contains(abs_addr) {
@ -188,11 +189,11 @@ impl Interconnect {
panic!("ScratchPad access through uncached memory");
}
return self.scratch_pad.load::<T>(offset);
return self.scratch_pad.load::<A>(offset);
}
if let Some(offset) = map::BIOS.contains(abs_addr) {
return self.bios.load::<T>(offset);
return self.bios.load::<A>(offset);
}
if let Some(offset) = map::IRQ_CONTROL.contains(abs_addr) {
@ -205,35 +206,35 @@ impl Interconnect {
}
if let Some(offset) = map::DMA.contains(abs_addr) {
return self.dma_reg::<T>(offset);
return self.dma_reg::<A>(offset);
}
if let Some(offset) = map::GPU.contains(abs_addr) {
return self.gpu.load::<T>(shared, offset);
return self.gpu.load::<A>(shared, offset);
}
if let Some(offset) = map::TIMERS.contains(abs_addr) {
return self.timers.load::<T>(shared, offset);
return self.timers.load::<A>(shared, offset);
}
if let Some(offset) = map::CDROM.contains(abs_addr) {
return self.cdrom.load::<T>(shared, offset);
return self.cdrom.load::<A>(shared, offset);
}
if let Some(offset) = map::MDEC.contains(abs_addr) {
return self.mdec.load::<T>(shared, offset);
return self.mdec.load::<A>(shared, offset);
}
if let Some(offset) = map::SPU.contains(abs_addr) {
return self.spu.load::<T>(offset);
return self.spu.load::<A>(offset);
}
if let Some(offset) = map::PAD_MEMCARD.contains(abs_addr) {
return self.pad_memcard.load::<T>(shared, offset);
return self.pad_memcard.load::<A>(shared, offset);
}
if let Some(offset) = map::EXPANSION_1.contains(abs_addr) {
return self.parallel_io.load::<T>(shared, offset);
return self.parallel_io.load::<A>(shared, offset);
}
if let Some(_) = map::RAM_SIZE.contains(abs_addr) {
@ -242,8 +243,8 @@ impl Interconnect {
if let Some(offset) = map::MEM_CONTROL.contains(abs_addr) {
if T::size() != 4 {
panic!("Unhandled MEM_CONTROL access ({})", T::size());
if A::size() != 4 {
panic!("Unhandled MEM_CONTROL access ({})", A::size());
}
let index = (offset >> 2) as usize;
@ -252,22 +253,22 @@ impl Interconnect {
}
if let Some(_) = map::CACHE_CONTROL.contains(abs_addr) {
if T::size() != 4 {
panic!("Unhandled cache control access ({})", T::size());
if A::size() != 4 {
panic!("Unhandled cache control access ({})", A::size());
}
return self.cache_control.0;
}
if let Some(offset) = map::EXPANSION_2.contains(abs_addr) {
return self.debug_uart.load::<T>(shared, offset);
return self.debug_uart.load::<A>(shared, offset);
}
panic!("unhandled load at address {:08x}", addr);
}
/// Interconnect: store `val` into `addr`
pub fn store<T: Addressable>(&mut self,
pub fn store<A: Addressable>(&mut self,
shared: &mut SharedState,
renderer: &mut Renderer,
addr: u32,
@ -276,7 +277,7 @@ impl Interconnect {
let abs_addr = map::mask_region(addr);
if let Some(offset) = map::RAM.contains(abs_addr) {
self.ram.store::<T>(offset, val);
self.ram.store::<A>(offset, val);
return;
}
@ -285,25 +286,25 @@ impl Interconnect {
panic!("ScratchPad access through uncached memory");
}
return self.scratch_pad.store::<T>(offset, val);
return self.scratch_pad.store::<A>(offset, val);
}
if let Some(offset) = map::IRQ_CONTROL.contains(abs_addr) {
match offset {
0 => shared.irq_state().ack(val as u16),
4 => shared.irq_state().set_mask(val as u16),
0 => shared.irq_state_mut().ack(val as u16),
4 => shared.irq_state_mut().set_mask(val as u16),
_ => panic!("Unhandled IRQ store at address {:08x}"),
}
return;
}
if let Some(offset) = map::DMA.contains(abs_addr) {
self.set_dma_reg::<T>(shared, renderer, offset, val);
self.set_dma_reg::<A>(shared, renderer, offset, val);
return;
}
if let Some(offset) = map::GPU.contains(abs_addr) {
self.gpu.store::<T>(shared,
self.gpu.store::<A>(shared,
renderer,
&mut self.timers,
offset,
@ -312,7 +313,7 @@ impl Interconnect {
}
if let Some(offset) = map::TIMERS.contains(abs_addr) {
self.timers.store::<T>(shared,
self.timers.store::<A>(shared,
&mut self.gpu,
offset,
val);
@ -320,25 +321,25 @@ impl Interconnect {
}
if let Some(offset) = map::CDROM.contains(abs_addr) {
return self.cdrom.store::<T>(shared, offset, val);
return self.cdrom.store::<A>(shared, offset, val);
}
if let Some(offset) = map::MDEC.contains(abs_addr) {
return self.mdec.store::<T>(shared, offset, val);
return self.mdec.store::<A>(shared, offset, val);
}
if let Some(offset) = map::SPU.contains(abs_addr) {
self.spu.store::<T>(offset, val);
self.spu.store::<A>(offset, val);
return;
}
if let Some(offset) = map::PAD_MEMCARD.contains(abs_addr) {
self.pad_memcard.store::<T>(shared, offset, val);
self.pad_memcard.store::<A>(shared, offset, val);
return;
}
if let Some(_) = map::CACHE_CONTROL.contains(abs_addr) {
if T::size() != 4 {
if A::size() != 4 {
panic!("Unhandled cache control access");
}
@ -349,8 +350,8 @@ impl Interconnect {
if let Some(offset) = map::MEM_CONTROL.contains(abs_addr) {
if T::size() != 4 {
panic!("Unhandled MEM_CONTROL access ({})", T::size());
if A::size() != 4 {
panic!("Unhandled MEM_CONTROL access ({})", A::size());
}
let val = val;
@ -379,7 +380,7 @@ impl Interconnect {
if let Some(_) = map::RAM_SIZE.contains(abs_addr) {
if T::size() != 4 {
if A::size() != 4 {
panic!("Unhandled RAM_SIZE access");
}
@ -388,7 +389,7 @@ impl Interconnect {
}
if let Some(offset) = map::EXPANSION_2.contains(abs_addr) {
self.debug_uart.store::<T>(shared, offset, val);
self.debug_uart.store::<A>(shared, offset, val);
return;
}
@ -397,7 +398,7 @@ impl Interconnect {
}
/// DMA register read
fn dma_reg<T: Addressable>(&self, offset: u32) -> u32 {
fn dma_reg<A: Addressable>(&self, offset: u32) -> u32 {
// The DMA uses 32bit registers
let align = offset & 3;
@ -433,7 +434,7 @@ impl Interconnect {
}
/// DMA register write
fn set_dma_reg<T: Addressable>(&mut self,
fn set_dma_reg<A: Addressable>(&mut self,
shared: &mut SharedState,
renderer: &mut Renderer,
offset: u32,
@ -497,9 +498,29 @@ impl Interconnect {
// process everything in one pass (i.e. no
// chopping or priority handling)
match self.dma.channel(port).sync() {
let sync = self.dma.channel(port).sync();
module_tracer("DMA", |m| {
let now = shared.tk().now();
let channel = self.dma.channel_mut(port);
m.trace(now, "sync", sync);
m.trace(now, "port", port);
m.trace(now, "base", channel.base());
m.trace(now, "to_ram",
channel.direction() == Direction::ToRam);
let size =
match channel.transfer_size() {
Some(v) => v,
None => 0xffffffff,
};
m.trace(now, "size", size);
});
match sync {
Sync::LinkedList => self.do_dma_linked_list(renderer, port),
_ => self.do_dma_block(renderer, port),
_ => self.do_dma_block(shared, renderer, port),
}
self.dma.done(shared, port);
@ -555,7 +576,10 @@ impl Interconnect {
/// Emulate DMA transfer for Manual and Request synchronization
/// modes.
fn do_dma_block(&mut self, renderer: &mut Renderer, port: Port) {
fn do_dma_block(&mut self,
shared: &mut SharedState,
renderer: &mut Renderer,
port: Port) {
let channel = self.dma.channel_mut(port);
let increment = match channel.step() {
@ -587,7 +611,7 @@ impl Interconnect {
match port {
Port::Gpu => self.gpu.gp0(renderer, src_word),
Port::MDecIn => self.mdec.command(src_word),
Port::MDecIn => self.mdec.command(shared, src_word),
// XXX ignre transfers to the SPU for now
Port::Spu => (),
_ => panic!("Unhandled DMA destination port {:?}",
@ -610,6 +634,7 @@ impl Interconnect {
0
}
Port::CdRom => self.cdrom.dma_read_word(),
Port::MDecOut => 0,
_ => panic!("Unhandled DMA source port {:?}", port),
};
@ -619,6 +644,8 @@ impl Interconnect {
addr = addr.wrapping_add(increment);
remsz -= 1;
// XXX Probably completely inaccurate
shared.tk().tick(1);
}
}
}

View file

@ -289,7 +289,7 @@ impl Timer {
panic!("Unhandled negate IRQ!");
} else {
// Pulse interrupt
shared.irq_state().assert(interrupt);
shared.irq_state_mut().assert(interrupt);
self.interrupt = true;
}
} else if !self.negate_irq {

View file

@ -4,6 +4,7 @@ use memory::Addressable;
use interrupt::Interrupt;
use timekeeper::{Peripheral, Cycles};
use shared::SharedState;
use tracer::module_tracer;
use self::gamepad::GamePad;
@ -84,6 +85,15 @@ impl PadMemCard {
shared: &mut SharedState,
offset: u32,
val: u32) {
module_tracer("PAD_MEMCARD", |m| {
let now = shared.tk().now();
m.trace(now, "w_offset", offset as u8);
m.trace(now, "w_size", T::size());
m.trace(now, "w_value", val);
});
self.sync(shared);
match offset {
@ -113,6 +123,13 @@ impl PadMemCard {
shared: &mut SharedState,
offset: u32) -> u32 {
module_tracer("PAD_MEMCARD", |m| {
let now = shared.tk().now();
m.trace(now, "r_offset", offset as u8);
m.trace(now, "r_size", T::size());
});
self.sync(shared);
match offset {
@ -132,6 +149,7 @@ impl PadMemCard {
4 => {
self.stat()
}
8 => self.mode as u32,
10 => self.control() as u32,
14 => self.baud_div as u32,
_ => panic!("Unhandled gamepad read {:?} 0x{:x}",
@ -175,7 +193,7 @@ impl PadMemCard {
if self.dsr_it {
if !self.interrupt {
// Rising edge of the interrupt
let irq_state = shared.irq_state();
let irq_state = shared.irq_state_mut();
irq_state.assert(Interrupt::PadMemCard);
}
@ -333,7 +351,7 @@ impl PadMemCard {
warn!("Gamepad interrupt acknowledge while DSR is active");
self.interrupt = true;
shared.irq_state().assert(Interrupt::PadMemCard);
shared.irq_state_mut().assert(Interrupt::PadMemCard);
}
}

View file

@ -116,6 +116,9 @@ impl ExeLoader {
loader: Vec::new(),
};
info!("Loaded PS-EXE: BASE=0x{:08x} ENTRY=0x{:08x} LEN={}",
base, entry, text_len);
loader.assemble_loader();
Ok(loader)

View file

@ -22,7 +22,11 @@ impl SharedState {
&mut self.tk
}
pub fn irq_state(&mut self) -> &mut InterruptState {
pub fn irq_state(&mut self) -> &InterruptState {
&self.irq_state
}
pub fn irq_state_mut(&mut self) -> &mut InterruptState {
&mut self.irq_state
}

172
src/tracer.rs Normal file
View file

@ -0,0 +1,172 @@
//! Interface used to log internal variables in order to generate
//! traces
use std::collections::HashMap;
pub type ValueType = u32;
pub type ValueSize = u8;
/// Underlying type of every logged value. Since we use a `u32` we
/// only support variables up to 32bits for now. The 2nd parameter is
/// the size of the value in bits.
#[derive(Copy, Clone)]
pub struct SizedValue(pub ValueType, pub ValueSize);
impl From<bool> for SizedValue {
fn from(v: bool) -> SizedValue {
SizedValue(v as ValueType, 1)
}
}
impl From<u8> for SizedValue {
fn from(v: u8) -> SizedValue {
SizedValue(v as ValueType, 8)
}
}
impl From<u16> for SizedValue {
fn from(v: u16) -> SizedValue {
SizedValue(v as ValueType, 16)
}
}
impl From<u32> for SizedValue {
fn from(v: u32) -> SizedValue {
SizedValue(v as ValueType, 32)
}
}
pub struct Variable {
size: ValueSize,
/// Log for this variable: `(date, value)`
log: Vec<(u64, ValueType)>,
}
impl Variable {
fn new(size: ValueSize) -> Variable {
Variable {
size: size,
log: Vec::new(),
}
}
pub fn size(&self) -> ValueSize {
self.size
}
pub fn log(&self) -> &Vec<(u64, ValueType)> {
&self.log
}
}
pub struct Module {
/// Variables, indexed by name.
variables: HashMap<&'static str, Variable>,
}
impl Module {
#[cfg(feature = "trace")]
fn new() -> Module {
Module {
variables: HashMap::new(),
}
}
pub fn variables(&self) -> &HashMap<&'static str, Variable> {
&self.variables
}
pub fn trace<V: Into<SizedValue>>(&mut self,
date: u64,
name: &'static str,
sized_value: V) {
let SizedValue(value, size) = sized_value.into();
let var = self.variables.entry(name).or_insert(Variable::new(size));
if var.size != size {
panic!("Incoherent size for variable {}: got {} and {}",
name, var.size, size);
}
if let Some(&(last_date, last_value)) = var.log.last() {
if last_date >= date {
panic!("Got out-of-order events for {} ({} >= {})",
name, last_date, date);
}
if last_value == value {
// No value change
return;
}
}
var.log.push((date, value));
}
}
#[cfg(feature = "trace")]
pub struct Tracer {
/// Modules, indexed by name
modules: HashMap<&'static str, Module>,
}
#[cfg(feature = "trace")]
impl Tracer {
fn new() -> Tracer {
Tracer {
modules: HashMap::new(),
}
}
fn module_mut(&mut self, name: &'static str) -> &mut Module {
self.modules.entry(name).or_insert(Module::new())
}
/// Reset the tracer and return the content of the previous trace
fn remove_trace(&mut self) -> HashMap<&'static str, Module> {
let mut swap = HashMap::new();
::std::mem::swap(&mut self.modules, &mut swap);
swap
}
}
/// Global logger instance
#[cfg(feature = "trace")]
lazy_static! {
static ref LOGGER: ::std::sync::Mutex<Tracer> = {
::std::sync::Mutex::new(Tracer::new())
};
}
#[cfg(feature = "trace")]
pub fn remove_trace() -> HashMap<&'static str, Module> {
let mut logger = LOGGER.lock().unwrap();
logger.remove_trace()
}
#[cfg(not(feature = "trace"))]
pub fn remove_trace() -> HashMap<&'static str, Module> {
HashMap::new()
}
#[cfg(feature = "trace")]
pub fn module_tracer<F>(name: &'static str, f: F)
where F: FnOnce(&mut Module) {
let mut logger = LOGGER.lock().unwrap();
let module = logger.module_mut(name);
f(module);
}
#[cfg(not(feature = "trace"))]
#[inline(always)]
pub fn module_tracer<F>(_name: &'static str, _f: F)
where F: FnOnce(&mut Module) {
// NOP
}