mov dptr,%1
}
+replace {
+ push acc
+ pop acc
+} by {
+ ; Peephole 202b removed redundant push pop
+}
+
replace {
mov r%1,_spx
lcall %2
replace {
mov %1,#(( %2 >> 8 ) ^ 0x80)
-} by {
+} by {
+ ; Peephole 213.a inserted fix
mov %1,#(%2 >> 8)
xrl %1,#0x80
} if portIsDS390
+replace {
+ mov %1,#(( %2 >> 16 ) ^ 0x80)
+} by {
+ ; Peephole 213.b inserted fix
+ mov %1,#(%2 >> 16)
+ xrl %1,#0x80
+} if portIsDS390
+
replace {
mov %1,#(( %2 + %3 >> 8 ) ^ 0x80)
} by {
+ ; Peephole 213.c inserted fix
mov %1,#((%2 + %3) >> 8)
xrl %1,#0x80
} if portIsDS390
mov r%4,a
movx @dptr,a
}
-
+
replace {
mov r%1,dpl
mov r%2,dph
mov r%3,dpx
}
+replace {
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ movx a,@dptr
+ mov r%4,a
+ orl ar%4,#%5
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ mov a,r1
+ movx @dptr,a
+} by {
+ ; Peephole 230.e save reload dptr
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ movx a,@dptr
+ orl a,#%5
+ mov r%4,a
+ movx @dptr,a
+}
+
+replace {
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ movx a,@dptr
+ mov r%4,a
+ anl ar%4,#%5
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ mov a,r1
+ movx @dptr,a
+} by {
+ ; Peephole 230.e save reload dptr
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+ movx a,@dptr
+ anl a,#%5
+ mov r%4,a
+ movx @dptr,a
+}
+
+replace {
+ mov r%1,dpl
+ mov r%2,dph
+ mov r%3,dpx
+ mov a,r%4
+ inc dps
+ movx @dptr,a
+ inc dptr
+ mov dps,#0
+ mov dpl,r%1
+ mov dph,r%2
+ mov dpx,r%3
+} by {
+ ; Peephole 230.f save reload dptr
+ mov r%1,dpl
+ mov r%2,dph
+ mov r%3,dpx
+ mov a,r%4
+ inc dps
+ movx @dptr,a
+ inc dptr
+ mov dps,#0
+}
+
replace {
mov ar%1,r%2
mov ar%3,r%1
mov r%4,a
} by {
; Peehole 232.b simplified xch
- mov r%3,#%1
- mov r%4,#%2
-}
\ No newline at end of file
+ mov r%3,#%2
+ mov r%4,#%1
+}
+
+replace {
+ mov dpl1,#%1
+ mov dph1,#(%1 >> 8)
+ mov dpx1,#(%1 >> 16)
+} by {
+ ; Peephole 233 24 bit load of dptr1
+ inc dps
+ mov dptr,#%1
+ dec dps
+}
+
+// 14 rules by Fiorenzo D. Ramaglia <fd.ramaglia@tin.it>
+
+replace {
+ add a,ar%1
+} by {
+ ; Peephole 236a
+ add a,r%1
+}
+
+replace {
+ addc a,ar%1
+} by {
+ ; Peephole 236b
+ addc a,r%1
+}
+
+replace {
+ anl a,ar%1
+} by {
+ ; Peephole 236c
+ anl a,r%1
+}
+
+replace {
+ dec ar%1
+} by {
+ ; Peephole 236d
+ dec r%1
+}
+
+replace {
+ djnz ar%1,%2
+} by {
+ ; Peephole 236e
+ djnz r%1,%2
+}
+
+replace {
+ inc ar%1
+} by {
+ ; Peephole 236f
+ inc r%1
+}
+
+replace {
+ mov a,ar%1
+} by {
+ ; Peephole 236g
+ mov a,r%1
+}
+
+replace {
+ mov ar%1,#%2
+} by {
+ ; Peephole 236h
+ mov r%1,#%2
+}
+
+replace {
+ mov ar%1,a
+} by {
+ ; Peephole 236i
+ mov r%1,a
+}
+
+replace {
+ mov ar%1,ar%2
+} by {
+ ; Peephole 236j
+ mov r%1,ar%2
+}
+
+replace {
+ orl a,ar%1
+} by {
+ ; Peephole 236k
+ orl a,r%1
+}
+
+replace {
+ subb a,ar%1
+} by {
+ ; Peephole 236l
+ subb a,r%1
+}
+
+replace {
+ xch a,ar%1
+} by {
+ ; Peephole 236m
+ xch a,r%1
+}
+
+replace {
+ xrl a,ar%1
+} by {
+ ; Peephole 236n
+ xrl a,r%1
+}
+
+replace {
+ sjmp %1
+%2:
+ mov %3,%4
+%1:
+ ret
+} by {
+ ; Peephole 237a removed sjmp to ret
+ ret
+%2:
+ mov %3,%4
+%1:
+ ret
+}
+
+replace {
+ sjmp %1
+%2:
+ mov %3,%4
+ mov dpl,%5
+ mov dph,%6
+%1:
+ ret
+} by {
+ ; Peephole 237b removed sjmp to ret
+ ret
+%2:
+ mov %3,%4
+ mov dpl,%5
+ mov dph,%6
+%1:
+ ret
+}
+
+// applies to f.e. device/lib/log10f.c
+replace {
+ mov %1,%9
+ mov %2,%10
+ mov %3,%11
+ mov %4,%12
+
+ mov %5,%13
+ mov %6,%14
+ mov %7,%15
+ mov %8,%16
+
+ mov %9,%1
+ mov %10,%2
+ mov %11,%3
+ mov %12,%4
+} by {
+ mov %1,%9
+ mov %2,%10
+ mov %3,%11
+ mov %4,%12
+
+ mov %5,%13
+ mov %6,%14
+ mov %7,%15
+ mov %8,%16
+ ; Peephole 238.a removed 4 redundant moves
+} if operandsNotSame8 %1 %2 %3 %4 %5 %6 %7 %8
+
+// applies to device/lib/log10f.c
+replace {
+ mov %1,%5
+ mov %2,%6
+ mov %3,%7
+ mov %4,%8
+
+ mov %5,%1
+ mov %6,%2
+ mov %7,%3
+} by {
+ mov %1,%5
+ mov %2,%6
+ mov %3,%7
+ mov %4,%8
+ ; Peephole 238.b removed 3 redundant moves
+} if operandsNotSame7 %1 %2 %3 %4 %5 %6 %7
+
+// applies to f.e. device/lib/time.c
+replace {
+ mov %1,%5
+ mov %2,%6
+
+ mov %3,%7
+ mov %4,%8
+
+ mov %5,%1
+ mov %6,%2
+} by {
+ mov %1,%5
+ mov %2,%6
+
+ mov %3,%7
+ mov %4,%8
+ ; Peephole 238.c removed 2 redundant moves
+} if operandsNotSame4 %1 %2 %3 %4
+
+// applies to f.e. support/regression/tests/bug-524209.c
+replace {
+ mov %1,%4
+ mov %2,%5
+ mov %3,%6
+
+ mov %4,%1
+ mov %5,%2
+ mov %6,%3
+} by {
+ mov %1,%4
+ mov %2,%5
+ mov %3,%6
+ ; Peephole 238.d removed 3 redundant moves
+} if operandsNotSame6 %1 %2 %3 %4 %5 %6
+
+// applies to f.e. ser_ir.asm
+replace {
+ mov r%1,acc
+} by {
+ ; Peephole 239 used a instead of acc
+ mov r%1,a
+}
+
+replace restart {
+ mov a,%1
+ addc a,#0x00
+} by {
+ ; Peephole 240 use clr instead of addc a,#0
+ clr a
+ addc a,%1
+}
+
+// peepholes 241.a to 241.c and 241.d to 241.f need to be in order
+replace {
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ cjne r%6,#%7,%3
+ cjne r%8,#%9,%3
+ mov a,#0x01
+ sjmp %10
+%3:
+ clr a
+%10:
+} by {
+ ; Peephole 241.a optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ cjne r%6,#%7,%3
+ cjne r%8,#%9,%3
+ inc a
+%3:
+%10:
+}
+
+// applies to f.e. time.c
+replace {
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ mov a,#0x01
+ sjmp %6
+%3:
+ clr a
+%6:
+} by {
+ ; Peephole 241.b optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ inc a
+%3:
+%6:
+}
+
+// applies to f.e. malloc.c
+replace {
+ cjne r%1,#%2,%3
+ mov a,#0x01
+ sjmp %4
+%3:
+ clr a
+%4:
+} by {
+ ; Peephole 241.c optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ inc a
+%3:
+%4:
+}
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata long k;
+replace {
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.d optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata int k;
+replace {
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.e optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. vprintf.asm (--stack-auto)
+replace {
+ cjne @r%1,#%2,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.f optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+%1:
+ jz %4
+} by {
+ ; Peephole 242.a avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+%1:
+ jz %4
+} by {
+ ; Peephole 242.b avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. logic.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+%1:
+ jz %4
+} by {
+ ; Peephole 242.c avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+replace {
+ jnz %1
+%1:
+} by {
+ ; Peephole 243a jump optimization
+} if labelRefCount %1 1
+
+replace {
+ jz %1
+%1:
+} by {
+ ; Peephole 243b jump optimization
+} if labelRefCount %1 1
+
+
+// This allows non-interrupt and interrupt code to safely compete
+// for a resource without the non-interrupt code having to disable
+// interrupts:
+// volatile bit resource_is_free;
+// if( resource_is_free ) {
+// resource_is_free=0; do_something; resource_is_free=1;
+// }
+replace {
+ jnb %1,%2
+%3:
+ clr %1
+} by {
+ ; Peephole 244.a using atomic test and clear
+ jbc %1,%3
+ sjmp %2
+%3:
+} if labelRefCount %3 0
+
+replace {
+ jb %1,%2
+ ljmp %3
+%2:
+ clr %1
+} by {
+ ; Peephole 244.b using atomic test and clear
+ jbc %1,%2
+ ljmp %3
+%2:
+} if labelRefCount %2 1
+