From a68329aec16e91b7b722dc4953fee362d2e8a8f3 Mon Sep 17 00:00:00 2001 From: Derek Hensley Date: Mon, 19 Aug 2024 21:48:28 -0600 Subject: [PATCH] Asm Formatting (#82) * Replace STAY2 with per instruction macros Co-authored-by: Tharo <17233964+Thar0@users.noreply.github.com> * asm Formatting Co-authored-by: Tharo <17233964+Thar0@users.noreply.github.com> * Remove unused STAY macros * space after comma for some asm args --------- Co-authored-by: Tharo <17233964+Thar0@users.noreply.github.com> --- include/sys/asm.h | 31 ++-- src/error/errorasm.s | 20 +-- src/libc/bcmp.s | 120 ++++++------- src/libc/bcopy.s | 326 ++++++++++++++++++------------------ src/libc/bzero.s | 70 ++++---- src/log/delay.s | 12 +- src/os/exceptasm.s | 36 ++-- src/os/getcause.s | 4 +- src/os/getcompare.s | 4 +- src/os/getconfig.s | 4 +- src/os/getcount.s | 4 +- src/os/getfpccsr.s | 4 +- src/os/getintmask.s | 39 +++-- src/os/getsr.s | 4 +- src/os/gettlbasid.s | 6 +- src/os/gettlbhi.s | 8 +- src/os/gettlblo0.s | 8 +- src/os/gettlblo1.s | 8 +- src/os/gettlbpagemask.s | 8 +- src/os/getwatchlo.s | 4 +- src/os/interrupt.s | 56 +++---- src/os/invaldcache.s | 58 +++---- src/os/invalicache.s | 40 ++--- src/os/maptlb.s | 127 +++++++------- src/os/maptlbrdb.s | 32 ++-- src/os/probetlb.s | 64 +++---- src/os/setcause.s | 4 +- src/os/setcompare.s | 4 +- src/os/setconfig.s | 4 +- src/os/setcount.s | 4 +- src/os/setfpccsr.s | 6 +- src/os/setintmask.s | 56 +++---- src/os/setsr.s | 4 +- src/os/settlbasid.s | 30 ++-- src/os/setwatchlo.s | 4 +- src/os/unmaptlb.s | 44 ++--- src/os/unmaptlball.s | 26 +-- src/os/writebackdcache.s | 40 ++--- src/os/writebackdcacheall.s | 16 +- src/rmon/rmonrcp.s | 8 +- 40 files changed, 675 insertions(+), 672 deletions(-) diff --git a/include/sys/asm.h b/include/sys/asm.h index 97f8fb28..60e6ccd2 100644 --- a/include/sys/asm.h +++ b/include/sys/asm.h @@ -83,32 +83,37 @@ extern "C" { #endif +#define NOP \ + .set noreorder ;\ + nop ;\ + .set reorder -#define STAY1(stmnt) \ +#define CACHE(op, reg) \ .set noreorder ;\ - stmnt ;\ + cache op, reg ;\ .set reorder -#define STAY2(stmnt, arg1) \ - .set noreorder ;\ - stmnt, arg1 ;\ +#define MFC0(reg, op) \ + .set noreorder ;\ + mfc0 reg, op ;\ .set reorder -#define STAY3(stmnt, arg1, arg2) \ - .set noreorder ;\ - stmnt, arg1, arg2 ;\ +#define MTC0(reg, op) \ + .set noreorder ;\ + mtc0 reg, op ;\ .set reorder -#define NOP \ - .set noreorder ;\ - nop ;\ +#define CFC1(reg, op) \ + .set noreorder ;\ + cfc1 reg, op ;\ .set reorder -#define CACHE(op, reg) \ +#define CTC1(reg, op) \ .set noreorder ;\ - cache op, reg ;\ + ctc1 reg, op ;\ .set reorder + #ifdef __cplusplus } #endif diff --git a/src/error/errorasm.s b/src/error/errorasm.s index 10f35615..c41cf6d4 100644 --- a/src/error/errorasm.s +++ b/src/error/errorasm.s @@ -7,23 +7,23 @@ LEAF(__osError) #if BUILD_VERSION < VERSION_J - lw t0,__osErrorHandler - beqz t0,1f + lw t0, __osErrorHandler + beqz t0, 1f jr t0 #elif BUILD_VERSION < VERSION_K - lw t0,__kmc_pt_mode - bnez t0,_kmc_mode + lw t0, __kmc_pt_mode + bnez t0, _kmc_mode - lw t0,__osErrorHandler - beqz t0,1f + lw t0, __osErrorHandler + beqz t0, 1f jr t0 _kmc_mode: - lw t0,__kmcErrorHandler - beqz t0,1f + lw t0, __kmcErrorHandler + beqz t0, 1f jr t0 #else - lw t0,__osCommonHandler - beqz t0,1f + lw t0, __osCommonHandler + beqz t0, 1f jr t0 #endif 1: diff --git a/src/libc/bcmp.s b/src/libc/bcmp.s index 11db46d9..def32355 100644 --- a/src/libc/bcmp.s +++ b/src/libc/bcmp.s @@ -9,88 +9,88 @@ WEAK(bcmp, _bcmp) #define _bcmp bcmp #endif LEAF(_bcmp) - xor v0, a0, a1 - blt a2, 16, bytecmp + xor v0, a0, a1 + blt a2, 16, bytecmp - andi v0, v0, 0x3 - negu t8, a0 - bnez v0, unalgncmp + andi v0, v0, 0x3 + negu t8, a0 + bnez v0, unalgncmp - andi t8, t8, 0x3 - subu a2, a2, t8 - beqz t8, wordcmp + andi t8, t8, 0x3 + subu a2, a2, t8 + beqz t8, wordcmp - move v0, v1 - lwl v0, 0(a0) - lwl v1, 0(a1) - addu a0, a0, t8 - addu a1, a1, t8 - bne v0, v1, cmpne + move v0, v1 + lwl v0, 0(a0) + lwl v1, 0(a1) + addu a0, a0, t8 + addu a1, a1, t8 + bne v0, v1, cmpne wordcmp: - and a3, a2, ~3 - subu a2, a2, a3 - beqz a3, bytecmp + and a3, a2, ~3 + subu a2, a2, a3 + beqz a3, bytecmp - addu a3, a3, a0 + addu a3, a3, a0 1: - lw v0, 0(a0) - lw v1, 0(a1) - addiu a0, a0, 4 - addiu a1, a1, 4 - bne v0, v1, cmpne - bne a0, a3, 1b + lw v0, 0(a0) + lw v1, 0(a1) + addiu a0, a0, 4 + addiu a1, a1, 4 + bne v0, v1, cmpne + bne a0, a3, 1b - b bytecmp + b bytecmp unalgncmp: - negu a3, a1 - andi a3, a3, 0x3 - subu a2, a2, a3 - beqz a3, partaligncmp + negu a3, a1 + andi a3, a3, 0x3 + subu a2, a2, a3 + beqz a3, partaligncmp + + addu a3, a3, a0 - addu a3, a3, a0 - 1: - lbu v0, 0(a0) - lbu v1, 0(a1) - addiu a0, a0, 1 - addiu a1, a1, 1 - bne v0, v1, cmpne - bne a0, a3, 1b + lbu v0, 0(a0) + lbu v1, 0(a1) + addiu a0, a0, 1 + addiu a1, a1, 1 + bne v0, v1, cmpne + bne a0, a3, 1b partaligncmp: - and a3, a2, ~3 - subu a2, a2, a3 - beqz a3, bytecmp + and a3, a2, ~3 + subu a2, a2, a3 + beqz a3, bytecmp - addu a3, a3, a0 + addu a3, a3, a0 1: - lwl v0, 0(a0) - lwr v0, 3(a0) - lw v1, 0(a1) - addiu a0, a0, 4 - addiu a1, a1, 4 - bne v0, v1, cmpne - bne a0, a3, 1b + lwl v0, 0(a0) + lwr v0, 3(a0) + lw v1, 0(a1) + addiu a0, a0, 4 + addiu a1, a1, 4 + bne v0, v1, cmpne + bne a0, a3, 1b bytecmp: - addu a3, a2, a0 - blez a2, cmpdone + addu a3, a2, a0 + blez a2, cmpdone 1: - lbu v0, 0(a0) - lbu v1, 0(a1) - addiu a0, a0, 1 - addiu a1, a1, 1 - bne v0, v1, cmpne - bne a0, a3, 1b + lbu v0, 0(a0) + lbu v1, 0(a1) + addiu a0, a0, 1 + addiu a1, a1, 1 + bne v0, v1, cmpne + bne a0, a3, 1b cmpdone: - move v0, zero - jr ra + move v0, zero + jr ra cmpne: - li v0, 1 - jr ra + li v0, 1 + jr ra .end _bcmp diff --git a/src/libc/bcopy.s b/src/libc/bcopy.s index 216c378d..08757413 100644 --- a/src/libc/bcopy.s +++ b/src/libc/bcopy.s @@ -9,212 +9,212 @@ WEAK(bcopy, _bcopy) #define _bcopy bcopy #endif LEAF(_bcopy) - move a3, a1 - beqz a2, ret - beq a0, a1, ret - blt a1, a0, goforwards + move a3, a1 + beqz a2, ret + beq a0, a1, ret + blt a1, a0, goforwards - add v0, a0, a2 - bge a1, v0, goforwards - b gobackwards + add v0, a0, a2 + bge a1, v0, goforwards + b gobackwards goforwards: - blt a2, 16, forwards_bytecopy + blt a2, 16, forwards_bytecopy - andi v0, a0, 0x3 - andi v1, a1, 0x3 - beq v0, v1,forwalignable + andi v0, a0, 0x3 + andi v1, a1, 0x3 + beq v0, v1, forwalignable forwards_bytecopy: - beqz a2, ret - addu v1, a0, a2 + beqz a2, ret + addu v1, a0, a2 99: - lb v0, 0(a0) - addiu a0, a0, 1 - sb v0, 0(a1) - addiu a1, a1, 1 - bne a0, v1, 99b + lb v0, 0(a0) + addiu a0, a0, 1 + sb v0, 0(a1) + addiu a1, a1, 1 + bne a0, v1, 99b ret: - move v0, a3 - jr ra + move v0, a3 + jr ra forwalignable: - beqz v0, forwards - beq v0, 1, forw_copy3 - beq v0, 2, forw_copy2 + beqz v0, forwards + beq v0, 1, forw_copy3 + beq v0, 2, forw_copy2 - lb v0, 0(a0) - addiu a0, a0, 1 - sb v0, 0(a1) - addiu a1, a1, 1 - addiu a2, a2, -1 - b forwards + lb v0, 0(a0) + addiu a0, a0, 1 + sb v0, 0(a1) + addiu a1, a1, 1 + addiu a2, a2, -1 + b forwards forw_copy2: - lh v0, 0(a0) - addiu a0, a0, 2 - sh v0, 0(a1) - addiu a1, a1, 2 - addiu a2, a2, -2 - b forwards + lh v0, 0(a0) + addiu a0, a0, 2 + sh v0, 0(a1) + addiu a1, a1, 2 + addiu a2, a2, -2 + b forwards forw_copy3: - lb v0, 0(a0) - lh v1, 1(a0) - addiu a0, a0, 3 - sb v0, 0(a1) - sh v1, 1(a1) - addiu a1, a1, 3 - addiu a2, a2, -3 + lb v0, 0(a0) + lh v1, 1(a0) + addiu a0, a0, 3 + sb v0, 0(a1) + sh v1, 1(a1) + addiu a1, a1, 3 + addiu a2, a2, -3 forwards: forwards_32: - blt a2, 32, forwards_16 - lw v0, 0(a0) - lw v1, 4(a0) - lw t0, 8(a0) - lw t1, 12(a0) - lw t2, 16(a0) - lw t3, 20(a0) - lw ta0, 24(a0) - lw ta1, 28(a0) - addiu a0, a0, 32 - sw v0, 0(a1) - sw v1, 4(a1) - sw t0, 8(a1) - sw t1, 12(a1) - sw t2, 16(a1) - sw t3, 20(a1) - sw ta0, 24(a1) - sw ta1, 28(a1) - addiu a1, a1, 32 - addiu a2, a2, -32 - b forwards_32 + blt a2, 32, forwards_16 + lw v0, 0(a0) + lw v1, 4(a0) + lw t0, 8(a0) + lw t1, 12(a0) + lw t2, 16(a0) + lw t3, 20(a0) + lw ta0, 24(a0) + lw ta1, 28(a0) + addiu a0, a0, 32 + sw v0, 0(a1) + sw v1, 4(a1) + sw t0, 8(a1) + sw t1, 12(a1) + sw t2, 16(a1) + sw t3, 20(a1) + sw ta0, 24(a1) + sw ta1, 28(a1) + addiu a1, a1, 32 + addiu a2, a2, -32 + b forwards_32 forwards_16: - blt a2, 16, forwards_4 - lw v0, 0(a0) - lw v1, 4(a0) - lw t0, 8(a0) - lw t1, 12(a0) - addiu a0, a0, 16 - sw v0, 0(a1) - sw v1, 4(a1) - sw t0, 8(a1) - sw t1, 12(a1) - addiu a1, a1, 16 - addiu a2, a2, -16 - b forwards_16 + blt a2, 16, forwards_4 + lw v0, 0(a0) + lw v1, 4(a0) + lw t0, 8(a0) + lw t1, 12(a0) + addiu a0, a0, 16 + sw v0, 0(a1) + sw v1, 4(a1) + sw t0, 8(a1) + sw t1, 12(a1) + addiu a1, a1, 16 + addiu a2, a2, -16 + b forwards_16 forwards_4: - blt a2, 4, forwards_bytecopy + blt a2, 4, forwards_bytecopy - lw v0, 0(a0) - addiu a0, a0, 4 - sw v0, 0(a1) - addiu a1, a1, 4 - addiu a2, a2, -4 - b forwards_4 + lw v0, 0(a0) + addiu a0, a0, 4 + sw v0, 0(a1) + addiu a1, a1, 4 + addiu a2, a2, -4 + b forwards_4 gobackwards: - add a0, a0,a2 - add a1, a1,a2 - blt a2, 16, backwards_bytecopy + add a0, a0, a2 + add a1, a1, a2 + blt a2, 16, backwards_bytecopy - andi v0, a0, 0x3 - andi v1, a1, 0x3 - beq v0, v1,backalignable + andi v0, a0, 0x3 + andi v1, a1, 0x3 + beq v0, v1, backalignable backwards_bytecopy: - beqz a2, ret - addiu a0, a0, -1 - addiu a1, a1, -1 - subu v1, a0,a2 + beqz a2, ret + addiu a0, a0, -1 + addiu a1, a1, -1 + subu v1, a0, a2 99: - lb v0, 0(a0) - addiu a0, a0, -1 - sb v0, 0(a1) - addiu a1, a1, -1 - bne a0, v1,99b - - move v0, a3 - jr ra + lb v0, 0(a0) + addiu a0, a0, -1 + sb v0, 0(a1) + addiu a1, a1, -1 + bne a0, v1, 99b + + move v0, a3 + jr ra backalignable: - beqz v0, backwards - beq v0, 3, back_copy3 - beq v0, 2, back_copy2 - lb v0, -1(a0) - addiu a0, a0, -1 - sb v0, -1(a1) - addiu a1, a1, -1 - addiu a2, a2, -1 + beqz v0, backwards + beq v0, 3, back_copy3 + beq v0, 2, back_copy2 + lb v0, -1(a0) + addiu a0, a0, -1 + sb v0, -1(a1) + addiu a1, a1, -1 + addiu a2, a2, -1 b backwards back_copy2: - lh v0, -2(a0) - addiu a0, a0, -2 - sh v0, -2(a1) - addiu a1, a1, -2 - addiu a2, a2, -2 - b backwards + lh v0, -2(a0) + addiu a0, a0, -2 + sh v0, -2(a1) + addiu a1, a1, -2 + addiu a2, a2, -2 + b backwards back_copy3: - lb v0, -1(a0) - lh v1, -3(a0) - addiu a0, a0, -3 - sb v0, -1(a1) - sh v1, -3(a1) - addiu a1, a1, -3 - addiu a2, a2, -3 + lb v0, -1(a0) + lh v1, -3(a0) + addiu a0, a0, -3 + sb v0, -1(a1) + sh v1, -3(a1) + addiu a1, a1, -3 + addiu a2, a2, -3 backwards: backwards_32: - blt a2, 32, backwards_16 - lw v0, -4(a0) - lw v1, -8(a0) - lw t0, -12(a0) - lw t1, -16(a0) - lw t2, -20(a0) - lw t3, -24(a0) - lw ta0, -28(a0) - lw ta1, -32(a0) - addiu a0, a0, -32 - sw v0, -4(a1) - sw v1, -8(a1) - sw t0, -12(a1) - sw t1, -16(a1) - sw t2, -20(a1) - sw t3, -24(a1) - sw ta0, -28(a1) - sw ta1, -32(a1) - addiu a1, a1, -32 - addiu a2, a2, -32 - b backwards_32 + blt a2, 32, backwards_16 + lw v0, -4(a0) + lw v1, -8(a0) + lw t0, -12(a0) + lw t1, -16(a0) + lw t2, -20(a0) + lw t3, -24(a0) + lw ta0, -28(a0) + lw ta1, -32(a0) + addiu a0, a0, -32 + sw v0, -4(a1) + sw v1, -8(a1) + sw t0, -12(a1) + sw t1, -16(a1) + sw t2, -20(a1) + sw t3, -24(a1) + sw ta0, -28(a1) + sw ta1, -32(a1) + addiu a1, a1, -32 + addiu a2, a2, -32 + b backwards_32 backwards_16: - blt a2, 16, backwards_4 - lw v0, -4(a0) - lw v1, -8(a0) - lw t0, -12(a0) - lw t1, -16(a0) - addiu a0, a0, -16 - sw v0, -4(a1) - sw v1, -8(a1) - sw t0, -12(a1) - sw t1, -16(a1) - addiu a1, a1, -16 - addiu a2, a2, -16 - b backwards_16 + blt a2, 16, backwards_4 + lw v0, -4(a0) + lw v1, -8(a0) + lw t0, -12(a0) + lw t1, -16(a0) + addiu a0, a0, -16 + sw v0, -4(a1) + sw v1, -8(a1) + sw t0, -12(a1) + sw t1, -16(a1) + addiu a1, a1, -16 + addiu a2, a2, -16 + b backwards_16 backwards_4: - blt a2, 4, backwards_bytecopy - lw v0, -4(a0) - addiu a0, a0, -4 - sw v0, -4(a1) - addiu a1, a1, -4 - addiu a2, a2, -4 - b backwards_4 + blt a2, 4, backwards_bytecopy + lw v0, -4(a0) + addiu a0, a0, -4 + sw v0, -4(a1) + addiu a1, a1, -4 + addiu a2, a2, -4 + b backwards_4 .end _bcopy diff --git a/src/libc/bzero.s b/src/libc/bzero.s index b34c4a30..c3deec0a 100644 --- a/src/libc/bzero.s +++ b/src/libc/bzero.s @@ -12,61 +12,61 @@ WEAK(blkclr, _blkclr) #endif LEAF(_bzero) XLEAF(_blkclr) - negu v1, a0 - blt a1, 12, bytezero + negu v1, a0 + blt a1, 12, bytezero - andi v1, v1, 0x3 - subu a1, a1, v1 + andi v1, v1, 0x3 + subu a1, a1, v1 - beqz v1, blkzero - swl zero, 0(a0) - addu a0, a0, v1 + beqz v1, blkzero + swl zero, 0(a0) + addu a0, a0, v1 blkzero: /* align backwards to 0x20 */ - and a3, a1, ~31 - subu a1, a1, a3 + and a3, a1, ~31 + subu a1, a1, a3 /* If the result is zero, the amount to zero is less than 0x20 bytes */ - beqz a3, wordzero + beqz a3, wordzero /* zero in blocks of 0x20 at a time */ - addu a3, a3, a0 + addu a3, a3, a0 1: - sw zero, 0(a0) - sw zero, 4(a0) - sw zero, 8(a0) - sw zero, 12(a0) - addiu a0, a0, 32 - sw zero, -16(a0) - sw zero, -12(a0) - sw zero, -8(a0) - sw zero, -4(a0) - bne a0, a3, 1b + sw zero, 0(a0) + sw zero, 4(a0) + sw zero, 8(a0) + sw zero, 12(a0) + addiu a0, a0, 32 + sw zero, -16(a0) + sw zero, -12(a0) + sw zero, -8(a0) + sw zero, -4(a0) + bne a0, a3, 1b wordzero: /* align backwards to 0x4 */ - and a3, a1, ~3 - subu a1, a1, a3 + and a3, a1, ~3 + subu a1, a1, a3 /* If the result is zero, the amount to zero is less than 0x4 bytes */ - beqz a3, bytezero + beqz a3, bytezero /* zero one word at a time */ - addu a3, a3, a0 + addu a3, a3, a0 1: - addiu a0, a0, 4 - sw zero, -4(a0) - bne a0, a3, 1b + addiu a0, a0, 4 + sw zero, -4(a0) + bne a0, a3, 1b bytezero: /* test if nothing left to zero */ - blez a1, zerodone - #nop + blez a1, zerodone + /* zero one byte at a time */ - addu a1, a1, a0 + addu a1, a1, a0 1: - addiu a0, a0, 1 - sb zero, -1(a0) - bne a0, a1, 1b + addiu a0, a0, 1 + sb zero, -1(a0) + bne a0, a1, 1b zerodone: - jr ra + jr ra .end _bzero diff --git a/src/log/delay.s b/src/log/delay.s index 631781a9..70782f68 100644 --- a/src/log/delay.s +++ b/src/log/delay.s @@ -5,16 +5,16 @@ #include "sys/regdef.h" LEAF(osDelay) - sll t0,a0,2 - addu t0,a0 - sll t0,t0,2 - beqz a0,2f + sll t0, a0, 2 + addu t0, a0 + sll t0, t0, 2 + beqz a0, 2f 1: - subu t0,1 + subu t0, 1 NOP NOP - bnez t0,1b + bnez t0, 1b 2: j ra diff --git a/src/os/exceptasm.s b/src/os/exceptasm.s index 59e57d54..f1e65874 100644 --- a/src/os/exceptasm.s +++ b/src/os/exceptasm.s @@ -187,11 +187,11 @@ LEAF(__osException) sd $1, THREAD_GP1(k0); .set at /* Save SR */ -STAY2(mfc0 k1, C0_SR) + MFC0( k1, C0_SR) sw k1, THREAD_SR(k0) /* Disable interrupts */ and k1, k1, ~(SR_IE | SR_EXL) -STAY2(mtc0 k1, C0_SR) + MTC0( k1, C0_SR) /* Save some temp registers for use in the following */ sd $8, THREAD_GP8(k0) sd $9, THREAD_GP9(k0) @@ -199,7 +199,7 @@ STAY2(mtc0 k1, C0_SR) /* Mark FPU as unused */ sw zero, THREAD_FP(k0) /* This instruction is useless, leftover because of bad placement of an ifdef for the debug version */ -STAY2(mfc0 t0, C0_CAUSE) + MFC0( t0, C0_CAUSE) #ifndef _FINALROM lw t2, __kmc_pt_mode bnez t2, skip_kmc_mode @@ -213,7 +213,7 @@ STAY2(mfc0 t0, C0_CAUSE) la t1, RDB_WRITE_INTR_REG sw zero, (t1) IP7check: -STAY2(mfc0 t0, C0_CAUSE) + MFC0( t0, C0_CAUSE) andi t0, t0, CAUSE_IP7 bne zero, t0, IP7check la t2, RDB_BASE_REG @@ -370,7 +370,7 @@ notIP7: 5: sw t1, __osRdb_IP6_CurSend checkIP6: -STAY2(mfc0 t0, C0_CAUSE) + MFC0( t0, C0_CAUSE) andi t0, t0, CAUSE_IP6 bne zero, t0, checkIP6 la t0, RDB_BASE_REG @@ -383,7 +383,7 @@ rdbout: ld $1, THREAD_GP1(k0) .set at lw k1, THREAD_SR(k0) -STAY2(mtc0 k1, C0_SR) + MTC0( k1, C0_SR) .set noreorder nop nop @@ -487,12 +487,12 @@ savercp: or t1, t1, t0 endrcp: sw t1, THREAD_RCP(k0) -STAY2(mfc0 t0, C0_EPC) + MFC0( t0, C0_EPC) sw t0, THREAD_PC(k0) lw t0, THREAD_FP(k0) beqz t0, 1f /* Save FP Registers if FPU was used by the thread */ -STAY2(cfc1 t0, fcr31) + CFC1( t0, fcr31) NOP sw t0, THREAD_FPCSR(k0) sdc1 $f0, THREAD_FP0(k0) @@ -516,7 +516,7 @@ STAY2(cfc1 t0, fcr31) * Determine the cause of the exception or interrupt and * enter appropriate handling routine */ -STAY2(mfc0 t0, C0_CAUSE) + MFC0( t0, C0_CAUSE) sw t0, THREAD_CAUSE(k0) .set noreorder @@ -595,8 +595,8 @@ IP7_Hdlr: * cop0 compare register, this interrupt is triggered */ counter: -STAY2(mfc0 t1, C0_COMPARE) -STAY2(mtc0 t1, C0_COMPARE) + MFC0( t1, C0_COMPARE) + MTC0( t1, C0_COMPARE) li a0, MESG(OS_EVENT_COUNTER) /* Post counter message */ jal send_mesg @@ -878,7 +878,7 @@ firstnmi: sw2: /* Mask out interrupt */ and t0, t0, ~CAUSE_SW2 -STAY2(mtc0 t0, C0_CAUSE) + MTC0( t0, C0_CAUSE) /* Post sw2 event message */ li a0, MESG(OS_EVENT_SW2) @@ -891,7 +891,7 @@ STAY2(mtc0 t0, C0_CAUSE) sw1: /* Mask out interrupt */ and t0, t0, ~CAUSE_SW1 -STAY2(mtc0 t0, C0_CAUSE) + MTC0( t0, C0_CAUSE) /* Post sw1 event message */ li a0, MESG(OS_EVENT_SW1) @@ -956,7 +956,7 @@ panic: sh t1, THREAD_FLAGS(k0) /* Save C0_BADVADDR */ -STAY2(mfc0 t2, C0_BADVADDR) + MFC0( t2, C0_BADVADDR) sw t2, THREAD_BADVADDR(k0) @@ -1058,7 +1058,7 @@ LEAF(__osEnqueueAndYield) sw a1, __osPreviousThread #endif /* Save SR */ -STAY2(mfc0 t0, C0_SR) + MFC0( t0, C0_SR) ori t0, t0, SR_EXL sw t0, THREAD_SR(a1) @@ -1229,7 +1229,7 @@ __osDispatchThreadSave: and t1, t1, t0 and k1, k1, ~SR_IMASK or k1, k1, t1 -STAY2(mtc0 k1, C0_SR) + MTC0( k1, C0_SR) /* Restore GPRs */ .set noat ld $1, THREAD_GP1(k0) @@ -1268,14 +1268,14 @@ STAY2(mtc0 k1, C0_SR) mthi k1 /* Move thread pc to EPC so that eret will return execution to where the thread left off */ lw k1, THREAD_PC(k0) -STAY2(mtc0 k1, C0_EPC) + MTC0( k1, C0_EPC) /* Check if the FPU was used by this thread and if so also restore the FPU registers */ lw k1, THREAD_FP(k0) beqz k1, 1f lw k1, THREAD_FPCSR(k0) -STAY2(ctc1 k1, fcr31) + CTC1( k1, fcr31) ldc1 $f0, THREAD_FP0(k0) ldc1 $f2, THREAD_FP2(k0) ldc1 $f4, THREAD_FP4(k0) diff --git a/src/os/getcause.s b/src/os/getcause.s index 873464f0..2c8c503a 100644 --- a/src/os/getcause.s +++ b/src/os/getcause.s @@ -4,6 +4,6 @@ .text LEAF(__osGetCause) - STAY2(mfc0 v0, C0_CAUSE) - jr ra + MFC0( v0, C0_CAUSE) + jr ra END(__osGetCause) diff --git a/src/os/getcompare.s b/src/os/getcompare.s index 99c17584..897c793b 100644 --- a/src/os/getcompare.s +++ b/src/os/getcompare.s @@ -4,6 +4,6 @@ .text LEAF(__osGetCompare) - STAY2(mfc0 v0, C0_COMPARE) - jr ra + MFC0( v0, C0_COMPARE) + jr ra END(__osGetCompare) diff --git a/src/os/getconfig.s b/src/os/getconfig.s index 81651785..9027a267 100644 --- a/src/os/getconfig.s +++ b/src/os/getconfig.s @@ -4,6 +4,6 @@ .text LEAF(__osGetConfig) - STAY2(mfc0 v0, C0_CONFIG) - jr ra + MFC0( v0, C0_CONFIG) + jr ra END(__osGetConfig) diff --git a/src/os/getcount.s b/src/os/getcount.s index 28d5a79f..9ce72c65 100644 --- a/src/os/getcount.s +++ b/src/os/getcount.s @@ -4,6 +4,6 @@ .text LEAF(osGetCount) - STAY2(mfc0 v0, C0_COUNT) - jr ra + MFC0( v0, C0_COUNT) + jr ra END(osGetCount) diff --git a/src/os/getfpccsr.s b/src/os/getfpccsr.s index 56837a5e..159bc4aa 100644 --- a/src/os/getfpccsr.s +++ b/src/os/getfpccsr.s @@ -4,6 +4,6 @@ .text LEAF(__osGetFpcCsr) - STAY2(cfc1 v0, fcr31) - jr ra + CFC1( v0, fcr31) + jr ra END(__osGetSR) # @bug: Should be __osGetFpcCsr diff --git a/src/os/getintmask.s b/src/os/getintmask.s index 573eed1e..293a57ca 100644 --- a/src/os/getintmask.s +++ b/src/os/getintmask.s @@ -7,27 +7,26 @@ .text .set noreorder LEAF(osGetIntMask) - mfc0 v0, C0_SR - andi v0, v0, OS_IM_CPU - la t0, __OSGlobalIntMask - lw t1, 0(t0) - xor t0, t1, -1 - andi t0, t0, SR_IMASK - or v0, v0, t0 + mfc0 v0, C0_SR + andi v0, v0, OS_IM_CPU + la t0, __OSGlobalIntMask + lw t1, 0(t0) + xor t0, t1, -1 + andi t0, t0, SR_IMASK + or v0, v0, t0 - lw t1, PHYS_TO_K1(MI_INTR_MASK_REG) - beqz t1, 1f + lw t1, PHYS_TO_K1(MI_INTR_MASK_REG) + beqz t1, 1f + la t0, __OSGlobalIntMask # this is intentionally a macro in the branch delay slot - la t0, __OSGlobalIntMask # this is intentionally a macro in the branch delay slot - - lw t0, 0(t0) - srl t0, t0, 0x10 - xor t0, t0, -1 - andi t0, t0, 0x3f - or t1, t1, t0 + lw t0, 0(t0) + srl t0, t0, 0x10 + xor t0, t0, -1 + andi t0, t0, 0x3f + or t1, t1, t0 1: - sll t2, t1, 0x10 - or v0, v0, t2 - jr ra - nop + sll t2, t1, 0x10 + or v0, v0, t2 + jr ra + nop END(osGetIntMask) diff --git a/src/os/getsr.s b/src/os/getsr.s index bc9c767c..2db98999 100644 --- a/src/os/getsr.s +++ b/src/os/getsr.s @@ -4,6 +4,6 @@ .text LEAF(__osGetSR) - STAY2(mfc0 v0, C0_SR) - jr ra + MFC0( v0, C0_SR) + jr ra END(__osGetSR) diff --git a/src/os/gettlbasid.s b/src/os/gettlbasid.s index a0702ea6..a831c0de 100644 --- a/src/os/gettlbasid.s +++ b/src/os/gettlbasid.s @@ -5,7 +5,7 @@ .set noreorder .text LEAF(__osGetTLBASID) - mfc0 v0, C0_ENTRYHI - jr ra - nop + mfc0 v0, C0_ENTRYHI + jr ra + nop END(__osGetTLBASID) diff --git a/src/os/gettlbhi.s b/src/os/gettlbhi.s index 04c03e90..1d5b1d53 100644 --- a/src/os/gettlbhi.s +++ b/src/os/gettlbhi.s @@ -5,13 +5,13 @@ .set noreorder .text LEAF(__osGetTLBHi) - mtc0 a0, C0_INX + mtc0 a0, C0_INX nop tlbr nop nop nop - mfc0 v0, C0_ENTRYHI - jr ra - nop + mfc0 v0, C0_ENTRYHI + jr ra + nop END(__osGetTLBHi) diff --git a/src/os/gettlblo0.s b/src/os/gettlblo0.s index 3b2c2ec8..2833a4e1 100644 --- a/src/os/gettlblo0.s +++ b/src/os/gettlblo0.s @@ -5,13 +5,13 @@ .set noreorder .text LEAF(__osGetTLBLo0) - mtc0 a0, C0_INX + mtc0 a0, C0_INX nop tlbr nop nop nop - mfc0 v0, C0_ENTRYLO0 - jr ra - nop + mfc0 v0, C0_ENTRYLO0 + jr ra + nop END(__osGetTLBLo0) diff --git a/src/os/gettlblo1.s b/src/os/gettlblo1.s index a24baef7..31fdd760 100644 --- a/src/os/gettlblo1.s +++ b/src/os/gettlblo1.s @@ -5,13 +5,13 @@ .set noreorder .text LEAF(__osGetTLBLo1) - mtc0 a0, C0_INX + mtc0 a0, C0_INX nop tlbr nop nop nop - mfc0 v0, C0_ENTRYLO1 - jr ra - nop + mfc0 v0, C0_ENTRYLO1 + jr ra + nop END(__osGetTLBLo1) diff --git a/src/os/gettlbpagemask.s b/src/os/gettlbpagemask.s index de9954a7..77611770 100644 --- a/src/os/gettlbpagemask.s +++ b/src/os/gettlbpagemask.s @@ -5,13 +5,13 @@ .set noreorder .text LEAF(__osGetTLBPageMask) - mtc0 a0, C0_INX + mtc0 a0, C0_INX nop tlbr nop nop nop - mfc0 v0, C0_PAGEMASK - jr ra - nop + mfc0 v0, C0_PAGEMASK + jr ra + nop END(__osGetTLBPageMask) diff --git a/src/os/getwatchlo.s b/src/os/getwatchlo.s index 4eee9ee0..e1460d2c 100644 --- a/src/os/getwatchlo.s +++ b/src/os/getwatchlo.s @@ -4,6 +4,6 @@ .text LEAF(__osGetWatchLo) - STAY2(mfc0 v0, C0_WATCHLO) - jr ra + MFC0( v0, C0_WATCHLO) + jr ra END(__osGetWatchLo) diff --git a/src/os/interrupt.s b/src/os/interrupt.s index 02c885f1..0a46ffc8 100644 --- a/src/os/interrupt.s +++ b/src/os/interrupt.s @@ -8,46 +8,46 @@ .set noreorder LEAF(__osDisableInt) #if BUILD_VERSION >= VERSION_J - la t2, __OSGlobalIntMask - lw t3, (t2) - andi t3, SR_IMASK - mfc0 t0, C0_SR - and t1, t0, ~SR_IE - mtc0 t1, C0_SR - andi v0, t0, SR_IE - lw t0, (t2) - andi t0, SR_IMASK - beq t0, t3, No_Change_Global_Int - la t2, __osRunningThread # this is intentionally a macro in the branch delay slot - lw t1, THREAD_SR(t2) - andi t2, t1, SR_IMASK - and t2, t0 - and t1, ~SR_IMASK - or t1, t2 - and t1, ~SR_IE - mtc0 t1, C0_SR + la t2, __OSGlobalIntMask + lw t3, (t2) + andi t3, SR_IMASK + mfc0 t0, C0_SR + and t1, t0, ~SR_IE + mtc0 t1, C0_SR + andi v0, t0, SR_IE + lw t0, (t2) + andi t0, SR_IMASK + beq t0, t3, No_Change_Global_Int + la t2, __osRunningThread # this is intentionally a macro in the branch delay slot + lw t1, THREAD_SR(t2) + andi t2, t1, SR_IMASK + and t2, t0 + and t1, ~SR_IMASK + or t1, t2 + and t1, ~SR_IE + mtc0 t1, C0_SR nop nop No_Change_Global_Int: - jr ra + jr ra nop #else - mfc0 t0, C0_SR - and t1, t0, ~SR_IE - mtc0 t1, C0_SR - andi v0, t0, SR_IE + mfc0 t0, C0_SR + and t1, t0, ~SR_IE + mtc0 t1, C0_SR + andi v0, t0, SR_IE nop - jr ra + jr ra nop #endif END(__osDisableInt) LEAF(__osRestoreInt) - mfc0 t0, C0_SR - or t0, t0, a0 - mtc0 t0, C0_SR + mfc0 t0, C0_SR + or t0, t0, a0 + mtc0 t0, C0_SR nop nop - jr ra + jr ra nop END(__osRestoreInt) diff --git a/src/os/invaldcache.s b/src/os/invaldcache.s index 51170107..cabad5b4 100644 --- a/src/os/invaldcache.s +++ b/src/os/invaldcache.s @@ -20,72 +20,72 @@ */ LEAF(osInvalDCache) /* If the amount to invalidate is less than or equal to 0, return immediately */ - blez a1, 3f + blez a1, 3f /* * If the amount to invalidate is as large as or larger than * the data cache size, invalidate all */ - li t3, DCACHE_SIZE - bgeu a1, t3, 4f + li t3, DCACHE_SIZE + bgeu a1, t3, 4f /* * Ensure end address does not wrap around and end up smaller * than the start address */ - move t0, a0 - addu t1, a0, a1 - bgeu t0, t1, 3f + move t0, a0 + addu t1, a0, a1 + bgeu t0, t1, 3f /* Mask start with cache line */ - addiu t1, t1, -DCACHE_LINESIZE - andi t2, t0, DCACHE_LINEMASK + addiu t1, t1, -DCACHE_LINESIZE + andi t2, t0, DCACHE_LINEMASK /* If mask is not zero, the start is not cache aligned */ - beqz t2, 1f + beqz t2, 1f /* Subtract mask result to align to cache line */ - subu t0, t0, t2 + subu t0, t0, t2 /* Hit-Writeback-Invalidate unaligned part */ - CACHE((C_HWBINV|CACH_PD), (t0)) + CACHE( (C_HWBINV | CACH_PD), (t0)) /* If that is all there is to do, return early */ - bgeu t0, t1, 3f + bgeu t0, t1, 3f - addiu t0, t0, DCACHE_LINESIZE + addiu t0, t0, DCACHE_LINESIZE 1: /* Mask end with cache line */ - andi t2, t1, DCACHE_LINEMASK + andi t2, t1, DCACHE_LINEMASK /* If mask is not zero, the end is not cache aligned */ - beqz t2, 2f + beqz t2, 2f /* Subtract mask result to align to cache line */ - subu t1, t1, t2 + subu t1, t1, t2 /* Hit-Writeback-Invalidate unaligned part */ - CACHE((C_HWBINV|CACH_PD), DCACHE_LINESIZE(t1)) - bltu t1, t0, 3f + CACHE( (C_HWBINV | CACH_PD), DCACHE_LINESIZE(t1)) + bltu t1, t0, 3f 2: /* Hit-Invalidate */ - CACHE((C_HINV|CACH_PD), (t0)) + CACHE( (C_HINV | CACH_PD), (t0)) .set noreorder - bltu t0, t1, 2b - addiu t0, t0, DCACHE_LINESIZE + bltu t0, t1, 2b + addiu t0, t0, DCACHE_LINESIZE .set reorder 3: - jr ra + jr ra 4: - li t0, K0BASE - addu t1, t0, t3 - addiu t1, t1, -DCACHE_LINESIZE + li t0, K0BASE + addu t1, t0, t3 + addiu t1, t1, -DCACHE_LINESIZE 5: /* Index-Writeback-Invalidate */ - CACHE((C_IINV|CACH_PD), (t0)) + CACHE( (C_IINV | CACH_PD), (t0)) .set noreorder - bltu t0, t1, 5b - addiu t0, t0, DCACHE_LINESIZE + bltu t0, t1, 5b + addiu t0, t0, DCACHE_LINESIZE .set reorder - jr ra + jr ra END(osInvalDCache) diff --git a/src/os/invalicache.s b/src/os/invalicache.s index 29c49078..541b3872 100644 --- a/src/os/invalicache.s +++ b/src/os/invalicache.s @@ -5,47 +5,47 @@ .text LEAF(osInvalICache) /* If the amount to invalidate is less than or equal to 0, return immediately */ - blez a1, 2f + blez a1, 2f /* * If the amount to invalidate is as large as or larger than * the instruction cache size, invalidate all */ - li t3, ICACHE_SIZE - bgeu a1, t3, 3f + li t3, ICACHE_SIZE + bgeu a1, t3, 3f /* * ensure end address does not wrap around and end up smaller * than the start address */ - move t0, a0 - addu t1, a0, a1 - bgeu t0, t1, 2f + move t0, a0 + addu t1, a0, a1 + bgeu t0, t1, 2f /* Mask and subtract to align to cache line */ - addiu t1, t1, -ICACHE_LINESIZE - andi t2, t0, ICACHE_LINEMASK - subu t0, t0, t2 + addiu t1, t1, -ICACHE_LINESIZE + andi t2, t0, ICACHE_LINEMASK + subu t0, t0, t2 1: - CACHE((C_HINV|CACH_PI), (t0)) + CACHE( (C_HINV | CACH_PI), (t0)) .set noreorder - bltu t0, t1, 1b - addiu t0, t0, ICACHE_LINESIZE + bltu t0, t1, 1b + addiu t0, t0, ICACHE_LINESIZE .set reorder 2: - jr ra + jr ra 3: - li t0, K0BASE - addu t1, t0, t3 - addiu t1, t1, -ICACHE_LINESIZE + li t0, K0BASE + addu t1, t0, t3 + addiu t1, t1, -ICACHE_LINESIZE 4: - CACHE((C_IINV|CACH_PI), (t0)) + CACHE( (C_IINV | CACH_PI), (t0)) .set noreorder - bltu t0, t1, 4b - addiu t0, t0, ICACHE_LINESIZE + bltu t0, t1, 4b + addiu t0, t0, ICACHE_LINESIZE .set reorder - jr ra + jr ra END(osInvalICache) diff --git a/src/os/maptlb.s b/src/os/maptlb.s index 20125b66..be8b7841 100644 --- a/src/os/maptlb.s +++ b/src/os/maptlb.s @@ -13,90 +13,90 @@ .set noreorder LEAF(osMapTLB) #if defined (_DEBUG) && defined (__sgi) - bgez index, 1f - nop - b 2f - nop + bgez index, 1f + nop + b 2f + nop 1: - li t0, 0x1F + li t0, 0x1F .set noat - slt AT, index, t0 - bnez AT, 3f - nop + slt AT, index, t0 + bnez AT, 3f + nop .set at 2: - move a2, a0 - li a0, ERR_OSMAPTLB_INDEX - li a1, 1 - j __osError - nop + move a2, a0 + li a0, ERR_OSMAPTLB_INDEX + li a1, 1 + j __osError + nop 3: - lw t0, asid - li t1, -1 + lw t0, asid + li t1, -1 .set noat - slt AT, t0, t1 - beqz AT, 4f - nop + slt AT, t0, t1 + beqz AT, 4f + nop .set at - b 5f - nop + b 5f + nop 4: - li t1, 0xFF + li t1, 0xFF .set noat - slt AT, t1, t0 - beqz AT, 6f - nop + slt AT, t1, t0 + beqz AT, 6f + nop .set at 5: - move a2, t0 - li a0, ERR_OSMAPTLB_ASID - li a1, 1 - j __osError - nop + move a2, t0 + li a0, ERR_OSMAPTLB_ASID + li a1, 1 + j __osError + nop 6: .set reorder #endif - STAY2(mfc0 t0, C0_ENTRYHI) - STAY2(mtc0 index, C0_INX) - STAY2(mtc0 pm, C0_PAGEMASK) + MFC0( t0, C0_ENTRYHI) + MTC0( index, C0_INX) + MTC0( pm, C0_PAGEMASK) .set noreorder - lw t1, asid - beq t1, -1, 7f - li ta0, TLBLO_G - li t2, TLBLO_NONCOHRNT | TLBLO_D | TLBLO_V - b 8f - or vaddr, vaddr, t1 + lw t1, asid + beq t1, -1, 7f + li ta0, TLBLO_G + li t2, (TLBLO_NONCOHRNT | TLBLO_D | TLBLO_V) + b 8f + or vaddr, vaddr, t1 .set reorder 7: - li t2, TLBLO_NONCOHRNT | TLBLO_D | TLBLO_V | TLBLO_G + li t2, (TLBLO_NONCOHRNT | TLBLO_D | TLBLO_V | TLBLO_G) 8: - STAY2(mtc0 vaddr, C0_ENTRYHI) - beq evenpaddr, -1, 9f - #nop - srl t3, evenpaddr, TLBLO_PFNSHIFT - or t3, t3, t2 - STAY2(mtc0 t3, C0_ENTRYLO0) - b 10f - #nop + MTC0( vaddr, C0_ENTRYHI) + beq evenpaddr, -1, 9f + + srl t3, evenpaddr, TLBLO_PFNSHIFT + or t3, t3, t2 + MTC0( t3, C0_ENTRYLO0) + b 10f + 9: - STAY2(mtc0 ta0, C0_ENTRYLO0) + MTC0( ta0, C0_ENTRYLO0) 10: - lw t3, oddpaddr - beq t3, -1, 11f - #nop - srl t3, t3, TLBLO_PFNSHIFT - or t3, t3, t2 - STAY2(mtc0 t3, C0_ENTRYLO1) - b 12f - #nop + lw t3, oddpaddr + beq t3, -1, 11f + + srl t3, t3, TLBLO_PFNSHIFT + or t3, t3, t2 + MTC0( t3, C0_ENTRYLO1) + b 12f + 11: - STAY2(mtc0 ta0, C0_ENTRYLO1) - bne evenpaddr, -1, 12f - #nop - li t3, K0BASE - STAY2(mtc0 t3, C0_ENTRYHI) + MTC0( ta0, C0_ENTRYLO1) + bne evenpaddr, -1, 12f + + li t3, K0BASE + MTC0( t3, C0_ENTRYHI) 12: .set noreorder nop @@ -105,7 +105,6 @@ LEAF(osMapTLB) nop nop nop - STAY2(mtc0 t0, C0_ENTRYHI) - jr ra - #nop + MTC0( t0, C0_ENTRYHI) + jr ra END(osMapTLB) diff --git a/src/os/maptlbrdb.s b/src/os/maptlbrdb.s index b7db6c37..1714304d 100644 --- a/src/os/maptlbrdb.s +++ b/src/os/maptlbrdb.s @@ -6,26 +6,26 @@ .text .set noreorder LEAF(osMapTLBRdb) - mfc0 t0, C0_ENTRYHI - li t1, NTLBENTRIES - mtc0 t1, C0_INX - mtc0 zero, C0_PAGEMASK /*4k*/ - li t2, TLBLO_UNCACHED | TLBLO_D | TLBLO_V | TLBLO_G - li t1, K2BASE - mtc0 t1, C0_ENTRYHI - li t1, RDB_BASE_VIRTUAL_ADDR - srl t3, t1,TLBLO_PFNSHIFT - or t3, t3,t2 - mtc0 t3, C0_ENTRYLO0 - li t1, TLBLO_G - mtc0 t1, C0_ENTRYLO1 + mfc0 t0, C0_ENTRYHI + li t1, NTLBENTRIES + mtc0 t1, C0_INX + mtc0 zero, C0_PAGEMASK /* 4k */ + li t2, (TLBLO_UNCACHED | TLBLO_D | TLBLO_V | TLBLO_G) + li t1, K2BASE + mtc0 t1, C0_ENTRYHI + li t1, RDB_BASE_VIRTUAL_ADDR + srl t3, t1, TLBLO_PFNSHIFT + or t3, t3, t2 + mtc0 t3, C0_ENTRYLO0 + li t1, TLBLO_G + mtc0 t1, C0_ENTRYLO1 nop tlbwi nop nop nop nop - mtc0 t0, C0_ENTRYHI - jr ra - nop + mtc0 t0, C0_ENTRYHI + jr ra + nop END(osMapTLBRdb) diff --git a/src/os/probetlb.s b/src/os/probetlb.s index 1f59afbe..5bd4981d 100644 --- a/src/os/probetlb.s +++ b/src/os/probetlb.s @@ -14,11 +14,11 @@ */ LEAF(__osProbeTLB) /* Set C0_ENTRYHI based on supplied vaddr */ - mfc0 t0, C0_ENTRYHI - andi t1, t0, TLBHI_PIDMASK - and t2, a0, (TLBHI_VPN2MASK << 32) >> 32 - or t1, t1, t2 - mtc0 t1, C0_ENTRYHI + mfc0 t0, C0_ENTRYHI + andi t1, t0, TLBHI_PIDMASK + and t2, a0, (TLBHI_VPN2MASK << 32) >> 32 + or t1, t1, t2 + mtc0 t1, C0_ENTRYHI nop nop nop @@ -30,11 +30,11 @@ LEAF(__osProbeTLB) nop nop /* Read result */ - mfc0 t3, C0_INX - and t3, t3, TLBINX_PROBE + mfc0 t3, C0_INX + and t3, t3, TLBINX_PROBE /* Branch if no match was found */ - bnez t3, 3f - nop + bnez t3, 3f + nop /* * Read TLB, sets C0_ENTRYHI, C0_ENTRYLO0, C0_ENTRYLO1 and C0_PAGEMASK for the TLB * entry indicated by C0_INX @@ -44,42 +44,42 @@ LEAF(__osProbeTLB) nop nop /* Calculate page size = (page mask + 0x2000) >> 1 */ - mfc0 t3, C0_PAGEMASK - addi t3, t3, 0x2000 - srl t3, t3, 0x1 + mfc0 t3, C0_PAGEMASK + addi t3, t3, 0x2000 + srl t3, t3, 0x1 /* & with vaddr */ - and ta0, t3, a0 + and ta0, t3, a0 /* Select C0_ENTRYLO0 or C0_ENTRYLO1 */ - bnez ta0, 1f + bnez ta0, 1f /* make bitmask out of page size */ - addi t3, t3,-1 - mfc0 v0, C0_ENTRYLO0 - b 2f - nop + addi t3, t3, -1 + mfc0 v0, C0_ENTRYLO0 + b 2f + nop 1: - mfc0 v0, C0_ENTRYLO1 + mfc0 v0, C0_ENTRYLO1 2: /* Check valid bit and branch if not valid */ - andi ta1, v0,TLBLO_V - beqz ta1, 3f - nop + andi ta1, v0, TLBLO_V + beqz ta1, 3f + nop /* Extract the Page Frame Number from the entry */ - and v0, v0, TLBLO_PFNMASK - sll v0, v0, TLBLO_PFNSHIFT + and v0, v0, TLBLO_PFNMASK + sll v0, v0, TLBLO_PFNSHIFT /* Mask vaddr with page size mask */ - and ta1, a0, t3 + and ta1, a0, t3 /* Add masked vaddr to pfn to obtain the physical address */ - add v0, v0, ta1 - b 4f - nop + add v0, v0, ta1 + b 4f + nop 3: /* * No physical address for the supplied virtual address was found, * return -1 */ - li v0, -1 + li v0, -1 4: - mtc0 t0, C0_ENTRYHI - jr ra - nop + mtc0 t0, C0_ENTRYHI + jr ra + nop END(__osProbeTLB) diff --git a/src/os/setcause.s b/src/os/setcause.s index 52e49505..02419183 100644 --- a/src/os/setcause.s +++ b/src/os/setcause.s @@ -4,6 +4,6 @@ .text LEAF(__osSetCause) - STAY2(mtc0 a0, C0_CAUSE) - jr ra + MTC0( a0, C0_CAUSE) + jr ra END(__osSetCause) diff --git a/src/os/setcompare.s b/src/os/setcompare.s index dec29a23..7f8969c7 100644 --- a/src/os/setcompare.s +++ b/src/os/setcompare.s @@ -4,6 +4,6 @@ .text LEAF(__osSetCompare) - STAY2(mtc0 a0, C0_COMPARE) - jr ra + MTC0( a0, C0_COMPARE) + jr ra END(__osSetCompare) diff --git a/src/os/setconfig.s b/src/os/setconfig.s index 4f062bdd..8a291ee6 100644 --- a/src/os/setconfig.s +++ b/src/os/setconfig.s @@ -4,6 +4,6 @@ .text LEAF(__osSetConfig) - STAY2(mtc0 a0, C0_CONFIG) - jr ra + MTC0( a0, C0_CONFIG) + jr ra END(__osSetConfig) diff --git a/src/os/setcount.s b/src/os/setcount.s index f94f1d29..1e5bdb0c 100644 --- a/src/os/setcount.s +++ b/src/os/setcount.s @@ -4,6 +4,6 @@ .text LEAF(__osSetCount) - STAY2(mtc0 a0, C0_COUNT) - jr ra + MTC0( a0, C0_COUNT) + jr ra END(__osSetCount) diff --git a/src/os/setfpccsr.s b/src/os/setfpccsr.s index caeccafe..4b8f0e6e 100644 --- a/src/os/setfpccsr.s +++ b/src/os/setfpccsr.s @@ -4,7 +4,7 @@ .text LEAF(__osSetFpcCsr) - STAY2(cfc1 v0, fcr31) - STAY2(ctc1 a0, fcr31) - jr ra + CFC1( v0, fcr31) + CTC1( a0, fcr31) + jr ra END(__osSetSR) # @bug: Should be __osSetFpcCsr diff --git a/src/os/setintmask.s b/src/os/setintmask.s index 74e7bd2e..b9f93b0a 100644 --- a/src/os/setintmask.s +++ b/src/os/setintmask.s @@ -38,57 +38,57 @@ */ LEAF(osSetIntMask) /* Extract interrupt enable bits from current SR */ - mfc0 ta0, C0_SR + mfc0 ta0, C0_SR - andi v0, ta0, OS_IM_CPU + andi v0, ta0, OS_IM_CPU /* Get value of __OSGlobalIntMask */ - la t0, __OSGlobalIntMask - lw t3, 0(t0) + la t0, __OSGlobalIntMask + lw t3, 0(t0) /* Bitwise-OR in the disabled CPU bits of __OSGlobalIntMask */ - xor t0, t3, ~0 - andi t0, t0,(SR_IMASK) - or v0, v0,t0 + xor t0, t3, ~0 + andi t0, t0, SR_IMASK + or v0, v0, t0 /* Fetch MI_INTR_MASK_REG */ - lw t2, PHYS_TO_K1(MI_INTR_MASK_REG) + lw t2, PHYS_TO_K1(MI_INTR_MASK_REG) /* If there are RCP interrupts masked */ - beqz t2, 1f - srl t1, t3,0x10 + beqz t2, 1f + srl t1, t3, 0x10 /* Bitwise-OR in the disabled RCP bits of __OSGlobalIntMask */ - xor t1, t1, ~0 - andi t1, t1, (RCP_IMASK >> RCP_IMASKSHIFT) - or t2, t2,t1 + xor t1, t1, ~0 + andi t1, t1, (RCP_IMASK >> RCP_IMASKSHIFT) + or t2, t2, t1 1: /* Shift the RCP bits to not conflict with the CPU bits */ - sll t2, t2, RCP_IMASKSHIFT + sll t2, t2, RCP_IMASKSHIFT /* OR the CPU and RCP bits together */ - or v0, v0,t2 + or v0, v0, t2 /* Extract RCP interrupt enable bits from requested mask and mask with __OSGlobalIntMask */ - and t0, a0, RCP_IMASK - and t0, t0,t3 + and t0, a0, RCP_IMASK + and t0, t0, t3 /* Convert to a value for MI_INTR_MASK_REG and set it */ - srl t0, t0,(RCP_IMASKSHIFT - 1) - lhu t2, __osRcpImTable(t0) - sw t2, PHYS_TO_K1(MI_INTR_MASK_REG) + srl t0, t0, (RCP_IMASKSHIFT - 1) + lhu t2, __osRcpImTable(t0) + sw t2, PHYS_TO_K1(MI_INTR_MASK_REG) /* Extract CPU interrupt enable bits from requested mask and mask with __OSGlobalIntMask */ - andi t0, a0, OS_IM_CPU - andi t1, t3, SR_IMASK - and t0, t0,t1 + andi t0, a0, OS_IM_CPU + andi t1, t3, SR_IMASK + and t0, t0, t1 - and ta0, ta0, ~SR_IMASK + and ta0, ta0, ~SR_IMASK /* Bitwise OR in the remaining bits of SR and set new SR */ - or ta0, ta0,t0 + or ta0, ta0, t0 - mtc0 ta0, C0_SR + mtc0 ta0, C0_SR nop nop - jr ra - nop + jr ra + nop END(osSetIntMask) .rdata diff --git a/src/os/setsr.s b/src/os/setsr.s index d7d98fac..37540fcb 100644 --- a/src/os/setsr.s +++ b/src/os/setsr.s @@ -4,7 +4,7 @@ .text LEAF(__osSetSR) - STAY2(mtc0 a0, C0_SR) + MTC0( a0, C0_SR) NOP - jr ra + jr ra END(__osSetSR) diff --git a/src/os/settlbasid.s b/src/os/settlbasid.s index 13a17e8e..e7f31fbe 100644 --- a/src/os/settlbasid.s +++ b/src/os/settlbasid.s @@ -7,26 +7,26 @@ LEAF(osSetTLBASID) #if defined (_DEBUG) && defined (__sgi) .set noreorder - bgez a0, 1f - nop - b 2f - nop + bgez a0, 1f + nop + b 2f + nop 1: - li t1, 0xFF + li t1, 0xFF .set noat - slt AT, t1, a0 - beqz AT, 3f - nop + slt AT, t1, a0 + beqz AT, 3f + nop .set at 2: - move a2, a0 - li a0, ERR_OSSETTLBASID - li a1, 1 - j __osError - nop + move a2, a0 + li a0, ERR_OSSETTLBASID + li a1, 1 + j __osError + nop 3: .set reorder #endif - STAY2(mtc0 a0, C0_ENTRYHI) - jr ra + MTC0( a0, C0_ENTRYHI) + jr ra END(osSetTLBASID) diff --git a/src/os/setwatchlo.s b/src/os/setwatchlo.s index 91cc58e8..4d54a51b 100644 --- a/src/os/setwatchlo.s +++ b/src/os/setwatchlo.s @@ -4,7 +4,7 @@ .text LEAF(__osSetWatchLo) - STAY2(mtc0 a0, C0_WATCHLO) + MTC0( a0, C0_WATCHLO) NOP - jr ra + jr ra END(__osSetWatchLo) diff --git a/src/os/unmaptlb.s b/src/os/unmaptlb.s index 6c9f631a..db7606af 100644 --- a/src/os/unmaptlb.s +++ b/src/os/unmaptlb.s @@ -7,39 +7,39 @@ .set noreorder LEAF(osUnmapTLB) #if defined (_DEBUG) && defined (__sgi) - bgez a0, 1f - nop - b 2f - nop + bgez a0, 1f + nop + b 2f + nop 1: - li t0, 0x1F + li t0, 0x1F .set noat - slt AT, a0, t0 - bnez AT, 3f - nop + slt AT, a0, t0 + bnez AT, 3f + nop .set at 2: - move a2, a0 - li a0, ERR_OSUNMAPTLB - li a1, 1 - j __osError - nop + move a2, a0 + li a0, ERR_OSUNMAPTLB + li a1, 1 + j __osError + nop 3: #endif - mfc0 t0, C0_ENTRYHI - mtc0 a0, C0_INX - li t1, K0BASE - mtc0 t1, C0_ENTRYHI - mtc0 zero, C0_ENTRYLO0 - mtc0 zero, C0_ENTRYLO1 + mfc0 t0, C0_ENTRYHI + mtc0 a0, C0_INX + li t1, K0BASE + mtc0 t1, C0_ENTRYHI + mtc0 zero, C0_ENTRYLO0 + mtc0 zero, C0_ENTRYLO1 nop tlbwi nop nop nop nop - mtc0 t0, C0_ENTRYHI - jr ra - nop + mtc0 t0, C0_ENTRYHI + jr ra + nop END(osUnmapTLB) diff --git a/src/os/unmaptlball.s b/src/os/unmaptlball.s index 2c4175af..0e09aa3e 100644 --- a/src/os/unmaptlball.s +++ b/src/os/unmaptlball.s @@ -5,22 +5,22 @@ .text .set noreorder LEAF(osUnmapTLBAll) - mfc0 t0, C0_ENTRYHI - li t1, NTLBENTRIES-1 /* last reserved for rdb */ - li t2, (K0BASE & TLBHI_VPN2MASK) - mtc0 t2, C0_ENTRYHI - mtc0 zero, C0_ENTRYLO0 - mtc0 zero, C0_ENTRYLO1 + mfc0 t0, C0_ENTRYHI + li t1, NTLBENTRIES-1 /* last reserved for rdb */ + li t2, (K0BASE & TLBHI_VPN2MASK) + mtc0 t2, C0_ENTRYHI + mtc0 zero, C0_ENTRYLO0 + mtc0 zero, C0_ENTRYLO1 1: - mtc0 t1, C0_INX + mtc0 t1, C0_INX nop tlbwi nop nop - addi t1, t1,-1 - bgez t1, 1b - nop - mtc0 t0, C0_ENTRYHI - jr ra - nop + addi t1, t1, -1 + bgez t1, 1b + nop + mtc0 t0, C0_ENTRYHI + jr ra + nop END(osUnmapTLBAll) diff --git a/src/os/writebackdcache.s b/src/os/writebackdcache.s index 0d9678a7..95d7c6fc 100644 --- a/src/os/writebackdcache.s +++ b/src/os/writebackdcache.s @@ -12,43 +12,43 @@ */ LEAF(osWritebackDCache) /* If the amount to write back is less than or equal to 0, return immediately */ - blez a1, 2f + blez a1, 2f /* * If the amount to write back is as large as or larger than * the data cache size, write back all */ - li t3, DCACHE_SIZE - bgeu a1,t3, 3f + li t3, DCACHE_SIZE + bgeu a1, t3, 3f /* * ensure end address does not wrap around and end up smaller * than the start address */ - move t0, a0 - addu t1, a0,a1 - bgeu t0, t1, 2f + move t0, a0 + addu t1, a0, a1 + bgeu t0, t1, 2f /* Mask and subtract to align to cache line */ - addiu t1, t1, -DCACHE_LINESIZE - andi t2, t0, DCACHE_LINEMASK - subu t0, t0,t2 + addiu t1, t1, -DCACHE_LINESIZE + andi t2, t0, DCACHE_LINEMASK + subu t0, t0, t2 1: .set noreorder - cache (C_HWB|CACH_PD), (t0) - bltu t0, t1, 1b - addiu t0, t0, DCACHE_LINESIZE + cache (C_HWB | CACH_PD), (t0) + bltu t0, t1, 1b + addiu t0, t0, DCACHE_LINESIZE .set reorder 2: - jr ra + jr ra /* same as osWritebackDCacheAll in operation */ 3: - li t0, K0BASE - addu t1, t0,t3 - addiu t1, t1, -DCACHE_LINESIZE + li t0, K0BASE + addu t1, t0, t3 + addiu t1, t1, -DCACHE_LINESIZE 4: .set noreorder - cache (C_IWBINV|CACH_PD), (t0) - bltu t0, t1, 4b - addiu t0, t0, DCACHE_LINESIZE + cache (C_IWBINV | CACH_PD), (t0) + bltu t0, t1, 4b + addiu t0, t0, DCACHE_LINESIZE .set reorder - jr ra + jr ra END(osWritebackDCache) diff --git a/src/os/writebackdcacheall.s b/src/os/writebackdcacheall.s index 044e1ca7..d792be87 100644 --- a/src/os/writebackdcacheall.s +++ b/src/os/writebackdcacheall.s @@ -4,15 +4,15 @@ .text LEAF(osWritebackDCacheAll) - li t0, K0BASE - li t2, DCACHE_SIZE - addu t1, t0,t2 - addiu t1, t1, -DCACHE_LINESIZE + li t0, K0BASE + li t2, DCACHE_SIZE + addu t1, t0, t2 + addiu t1, t1, -DCACHE_LINESIZE 1: .set noreorder - cache (C_IWBINV | CACH_PD), (t0) - bltu t0, t1, 1b - addiu t0, t0, DCACHE_LINESIZE + cache (C_IWBINV | CACH_PD), (t0) + bltu t0, t1, 1b + addiu t0, t0, DCACHE_LINESIZE .set reorder - jr ra + jr ra END(osWritebackDCacheAll) diff --git a/src/rmon/rmonrcp.s b/src/rmon/rmonrcp.s index 539f41d7..bddc1725 100644 --- a/src/rmon/rmonrcp.s +++ b/src/rmon/rmonrcp.s @@ -12,7 +12,7 @@ LEAF(__rmonRCPrunning) move v0, zero lw t0, PHYS_TO_K1(SP_STATUS_REG) - and t0, SP_STATUS_HALT | SP_STATUS_BROKE + and t0, (SP_STATUS_HALT | SP_STATUS_BROKE) bnez t0, isHalted ori v0, 1 isHalted: @@ -33,7 +33,7 @@ wait4dma: awaitIdle: li a0, PHYS_TO_K1(SP_STATUS_REG) lw v0, (a0) - and v0, SP_STATUS_HALT | SP_STATUS_BROKE + and v0, (SP_STATUS_HALT | SP_STATUS_BROKE) beqz v0, awaitIdle jr ra END(__rmonIdleRCP) @@ -41,7 +41,7 @@ END(__rmonIdleRCP) /* run the rsp in single-step mode to step one instruction */ LEAF(__rmonStepRCP) li a0, PHYS_TO_K1(SP_STATUS_REG) - li a1, SP_CLR_INTR_BREAK | SP_SET_SSTEP | SP_CLR_BROKE | SP_CLR_HALT + li a1, (SP_CLR_INTR_BREAK | SP_SET_SSTEP | SP_CLR_BROKE | SP_CLR_HALT) sw a1, (a0) b awaitIdle END(__rmonStepRCP) @@ -52,7 +52,7 @@ LEAF(__rmonRunRCP) li a1, MI_INTR_MASK_SET_SP sw a1, (a0) li a0, PHYS_TO_K1(SP_STATUS_REG) - li a1, SP_SET_INTR_BREAK | SP_CLR_SSTEP | SP_CLR_BROKE | SP_CLR_HALT + li a1, (SP_SET_INTR_BREAK | SP_CLR_SSTEP | SP_CLR_BROKE | SP_CLR_HALT) sw a1, (a0) jr ra END(__rmonRunRCP)