+ ; Peephole 236.g used r%1 instead of ar%1
+ mov a,r%1
+}
+
+replace {
+ mov ar%1,#%2
+} by {
+ ; Peephole 236.h used r%1 instead of ar%1
+ mov r%1,#%2
+}
+
+replace {
+ mov ar%1,a
+} by {
+ ; Peephole 236.i used r%1 instead of ar%1
+ mov r%1,a
+}
+
+replace {
+ mov ar%1,ar%2
+} by {
+ ; Peephole 236.j used r%1 instead of ar%1
+ mov r%1,ar%2
+}
+
+replace {
+ orl a,ar%1
+} by {
+ ; Peephole 236.k used r%1 instead of ar%1
+ orl a,r%1
+}
+
+replace {
+ subb a,ar%1
+} by {
+ ; Peephole 236.l used r%1 instead of ar%1
+ subb a,r%1
+}
+
+replace {
+ xch a,ar%1
+} by {
+ ; Peephole 236.m used r%1 instead of ar%1
+ xch a,r%1
+}
+
+replace {
+ xrl a,ar%1
+} by {
+ ; Peephole 236.n used r%1 instead of ar%1
+ xrl a,r%1
+}
+
+replace {
+ sjmp %1
+%2:
+ mov %3,%4
+%1:
+ ret
+} by {
+ ; Peephole 237.a removed sjmp to ret
+ ret
+%2:
+ mov %3,%4
+%1:
+ ret
+}
+
+replace {
+ sjmp %1
+%2:
+ mov %3,%4
+ mov dpl,%5
+ mov dph,%6
+%1:
+ ret
+} by {
+ ; Peephole 237.b removed sjmp to ret
+ ret
+%2:
+ mov %3,%4
+ mov dpl,%5
+ mov dph,%6
+%1:
+ ret
+}
+
+// applies to f.e. device/lib/log10f.c
+replace {
+ mov %1,%9
+ mov %2,%10
+ mov %3,%11
+ mov %4,%12
+
+ mov %5,%13
+ mov %6,%14
+ mov %7,%15
+ mov %8,%16
+
+ mov %9,%1
+ mov %10,%2
+ mov %11,%3
+ mov %12,%4
+} by {
+ mov %1,%9
+ mov %2,%10
+ mov %3,%11
+ mov %4,%12
+
+ mov %5,%13
+ mov %6,%14
+ mov %7,%15
+ mov %8,%16
+ ; Peephole 238.a removed 4 redundant moves
+} if operandsNotSame8 %1 %2 %3 %4 %5 %6 %7 %8
+
+// applies to device/lib/log10f.c
+replace {
+ mov %1,%5
+ mov %2,%6
+ mov %3,%7
+ mov %4,%8
+
+ mov %5,%1
+ mov %6,%2
+ mov %7,%3
+} by {
+ mov %1,%5
+ mov %2,%6
+ mov %3,%7
+ mov %4,%8
+ ; Peephole 238.b removed 3 redundant moves
+} if operandsNotSame7 %1 %2 %3 %4 %5 %6 %7
+
+// applies to f.e. device/lib/time.c
+replace {
+ mov %1,%5
+ mov %2,%6
+
+ mov %3,%7
+ mov %4,%8
+
+ mov %5,%1
+ mov %6,%2
+} by {
+ mov %1,%5
+ mov %2,%6
+
+ mov %3,%7
+ mov %4,%8
+ ; Peephole 238.c removed 2 redundant moves
+} if operandsNotSame4 %1 %2 %3 %4
+
+// applies to f.e. support/regression/tests/bug-524209.c
+replace {
+ mov %1,%4
+ mov %2,%5
+ mov %3,%6
+
+ mov %4,%1
+ mov %5,%2
+ mov %6,%3
+} by {
+ mov %1,%4
+ mov %2,%5
+ mov %3,%6
+ ; Peephole 238.d removed 3 redundant moves
+} if operandsNotSame6 %1 %2 %3 %4 %5 %6
+
+// applies to f.e. ser_ir.asm
+replace {
+ mov r%1,acc
+} by {
+ ; Peephole 239 used a instead of acc
+ mov r%1,a
+}
+
+replace restart {
+ mov a,%1
+ addc a,#0x00
+} by {
+ ; Peephole 240 use clr instead of addc a,#0
+ clr a
+ addc a,%1
+}
+
+// peepholes 241.a to 241.c and 241.d to 241.f need to be in order
+replace {
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ cjne r%6,#%7,%3
+ cjne r%8,#%9,%3
+ mov a,#0x01
+ sjmp %10
+%3:
+ clr a
+%10:
+} by {
+ ; Peephole 241.a optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ cjne r%6,#%7,%3
+ cjne r%8,#%9,%3
+ inc a
+%3:
+%10:
+}
+
+// applies to f.e. time.c
+replace {
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ mov a,#0x01
+ sjmp %6
+%3:
+ clr a
+%6:
+} by {
+ ; Peephole 241.b optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ cjne r%4,#%5,%3
+ inc a
+%3:
+%6:
+}
+
+// applies to f.e. malloc.c
+replace {
+ cjne r%1,#%2,%3
+ mov a,#0x01
+ sjmp %4
+%3:
+ clr a
+%4:
+} by {
+ ; Peephole 241.c optimized compare
+ clr a
+ cjne r%1,#%2,%3
+ inc a
+%3:
+%4:
+}
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata long k;
+replace {
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.d optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata int k;
+replace {
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.e optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. vprintf.asm (--stack-auto)
+replace {
+ cjne @r%1,#%2,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.f optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+%1:
+ jz %4
+} by {
+ ; Peephole 242.a avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+%1:
+ jz %4
+} by {
+ ; Peephole 242.b avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. logic.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+%1:
+ jz %4
+} by {
+ ; Peephole 242.c avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. vprintf.c
+// this is a rare case, usually the "tail increment" is noticed earlier
+replace {
+ cjne %1,%2,%3
+ inc %4
+%3:
+ sjmp %5
+} by {
+ ; Peephole 243 avoided branch to sjmp
+ cjne %1,%2,%5
+ inc %4
+%3:
+ sjmp %5
+} if labelInRange
+
+// applies to f.e. simplefloat.c (saving 1 cycle)
+replace {
+ mov r%1,dpl
+ mov a,r%1
+} by {
+ ; Peephole 244.a moving first to a instead of r%1
+ mov a,dpl
+ mov r%1,a
+}
+
+// applies to f.e. _itoa.c (saving 1 cycle)
+replace {
+ mov r%1,dph
+ mov a,r%1
+} by {
+ ; Peephole 244.b moving first to a instead of r%1
+ mov a,dph
+ mov r%1,a
+}
+
+
+// applies to f.e. bug-460010.c (saving 1 cycle)
+replace {
+ mov r%1,a
+ mov dpl,r%1
+} by {
+ ; Peephole 244.c loading dpl from a instead of r%1
+ mov r%1,a
+ mov dpl,a
+}
+
+replace {
+ mov r%1,a
+ mov dph,r%1
+} by {
+ ; Peephole 244.d loading dph from a instead of r%1
+ mov r%1,a
+ mov dph,a
+}
+
+// this one is safe but disables 245.a 245.b
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+} by {
+ ; Peephole 245 optimized complement (r%1 and acc set needed?)
+ cpl c
+ clr a
+ rlc a
+ mov r%1,a
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. vprintf.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jz %3
+} by {
+ ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
+ jc %3
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jnz %3
+} by {
+ ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
+ jnc %3
+} if labelRefCount %2 1
+
+
+// rules 246.x apply to f.e. bitfields.c
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.a combined clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2&%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.b combined set/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2|%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.c combined set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.d combined clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.e combined set/clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3&%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.f combined set/clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.g combined clr/set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.h combined clr/set/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3|%4
+ movx @dptr,a
+} if notVolatile %1
+
+
+
+
+// rules 247.x apply to f.e. bitfields.c
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.a combined clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2&%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.b combined set/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2|%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.c combined set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.d combined clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.e combined set/clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3&%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.f combined set/clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.g combined clr/set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%4
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.h combined clr/set/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3|%4
+ mov @r%5,a
+} if notVolatile %1
+
+
+// Peepholes 248.x have to be compatible with the keyword volatile.
+// They optimize typical accesses to memory mapped I/O devices:
+// volatile xdata char t; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ orl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.a optimized or to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ anl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.b optimized and to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ xrl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.c optimized xor to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.d optimized or/and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.e optimized and/or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.f optimized or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.g optimized and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.h optimized xor/xor to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+}
+
+// Peeepholes 248.i to 248.m are like 248.d to 248.h except they apply to bitfields:
+// xdata struct { unsigned b0:1; unsigned b1:1; unsigned b2:1; } xport;
+// xport.b0=1; xport.b0=0; xport.b0=1;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.i optimized or/and/or to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.j optimized and/or/and to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.k optimized or/and to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.l optimized and/or to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.m optimized xor/xor to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+}
+
+
+replace {
+ jnz %1
+%1:
+} by {
+ ; Peephole 249.a jump optimization
+} if labelRefCount %1 1
+
+replace {
+ jz %1
+%1:
+} by {
+ ; Peephole 249.b jump optimization
+} if labelRefCount %1 1
+
+
+// This allows non-interrupt and interrupt code to safely compete
+// for a resource without the non-interrupt code having to disable
+// interrupts:
+// volatile bit resource_is_free;
+// if( resource_is_free ) {
+// resource_is_free=0; do_something; resource_is_free=1;
+// }
+replace {
+ jnb %1,%2
+%3:
+ clr %1
+} by {
+ ; Peephole 250.a using atomic test and clear
+ jbc %1,%3
+ sjmp %2
+%3:
+} if labelRefCount %3 0
+
+replace {
+ jb %1,%2
+ ljmp %3
+%2:
+ clr %1
+} by {
+ ; Peephole 250.b using atomic test and clear
+ jbc %1,%2
+ ljmp %3
+%2:
+} if labelRefCount %2 1
+
+
+// not before peephole 250.b
+replace {
+ ljmp %5
+} by {
+ ; Peephole 251.a replaced ljmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+// not before peephole 250.b
+replace {
+ sjmp %5
+} by {
+ ; Peephole 251.b replaced sjmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+// applies to shifts.c and when accessing arrays with an unsigned integer index
+// saving 1 byte, 2 cycles
+replace {
+ mov r%1,%2
+ mov a,(%2 + 1)
+ xch a,r%1
+ add a,acc
+ xch a,r%1
+ rlc a
+ mov r%3,a
+} by {
+ ; Peephole 252 optimized left shift
+ mov a,%2
+ add a,acc
+ mov r%1,a
+ mov a,(%2 + 1)
+ rlc a
+ mov r%3,a
+}
+
+// applies to: void test( char c ) { if( c ) func1(); else func2(); }
+replace {
+ lcall %1
+ ret
+} by {
+ ; Peephole 253.a replaced lcall/ret with ljmp
+ ljmp %1
+}
+
+// applies to: void test( char c ) { if( c ) func1(); else func2(); }
+replace {
+ lcall %1
+%2:
+ ret
+} by {
+ ; Peephole 253.b replaced lcall/ret with ljmp
+ ljmp %1
+} if labelRefCount %2 0
+
+// applies to f.e. scott-bool1.c
+replace {
+ lcall %1
+%2:
+ ret
+} by {
+ ; Peephole 253.c replaced lcall with ljmp
+ ljmp %1
+%2:
+ ret
+}
+
+
+// applies to f.e. funptrs.c
+// saves one byte if %1 is a register or @register
+replace {
+ mov a,%1
+ add a,acc
+} by {
+ ; Peephole 254 optimized left shift
+ mov a,%1
+ add a,%1
+} if notVolatile %1
+
+// applies to f.e. switch.c
+replace {
+ clr c
+ mov a,#%1
+ subb a,%2
+ jc %3
+%4:
+ mov a,%2
+ add a,%2
+ add a,%2
+ mov dptr,%5
+ jmp @a+dptr
+} by {
+ ; Peephole 255 optimized jump table index calculation
+ mov a,%2
+ cjne a,#(%1+0x01),.+1
+ jnc %3
+%4:
+ add a,%2
+ add a,%2
+ mov dptr,%5
+ jmp @a+dptr
+}
+
+// applies to f.e. jump tables and scott-bool1.c.
+// similar peepholes can be constructed for other instructions
+// after which a flag or a register is known (like: djnz, cjne, jnc)
+replace {
+ jc %1
+%2:
+ clr c
+} by {
+ ; Peephole 256.a removed redundant clr c
+ jc %1
+%2:
+} if labelRefCount %2 0
+
+// applies to f.e. logf.c
+replace {
+ jnz %1
+%2:
+ clr a
+} by {
+ ; Peephole 256.b removed redundant clr a
+ jnz %1
+%2:
+} if labelRefCount %2 0
+
+// applies to f.e. bug-905492.c
+replace {
+ jnz %1
+%2:
+ mov %3,#0x00
+} by {
+ ; Peephole 256.c loading %3 with zero from a
+ jnz %1
+%2:
+ mov %3,a
+} if labelRefCount %2 0
+
+// applies to f.e. malloc.c
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %3,#0x00
+} by {
+ ; Peephole 256.d loading %3 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4)
+
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %3,#0x00
+} by {
+ ; Peephole 256.e loading %3 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4 %6)
+
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %8,%9
+ mov %3,#0x00
+} by {
+ ; Peephole 256.f loading %2 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %8,%9
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4 %6 %8)
+
+
+// unsigned char i=8; do{ } while(--i != 0);
+// this currently only applies if i is kept in a register
+replace {
+ dec %1
+ cjne %1,#0x00,%2
+} by {
+ ; Peephole 257 optimized decrement with compare
+ djnz %1,%2
+} if notVolatile %1
+
+
+// in_byte<<=1; if(in_bit) in_byte|=1;
+// helps f.e. reading data on a 3-wire (SPI) bus
+replace {
+ mov a,%1
+ add a,%1
+ mov %1,a
+ jnb %2,%3
+%4:
+ orl %1,#0x01
+%3:
+} by {
+ ; Peephole 258.a optimized bitbanging
+ mov a,%1
+ mov c,%2
+ addc a,%1
+ mov %1,a
+%4:
+%3:
+} if notVolatile %1
+
+// in_byte<<=1; if(in_bit) in_byte|=1;
+replace {
+ mov a,r%1
+ add a,r%1
+ mov r%1,a
+ jnb %2,%3
+%4:
+ orl ar%1,#0x01
+%3:
+} by {
+ ; Peephole 258.b optimized bitbanging
+ mov a,r%1
+ mov c,%2
+ addc a,r%1
+ mov r%1,a
+%4:
+%3:
+}
+
+// in_byte>>=1; if(in_bit) in_byte|=0x80;
+replace {
+ mov a,%1
+ clr c
+ rrc a
+ mov %1,a
+ jnb %2,%3
+%4:
+ orl %1,#0x80
+%3:
+} by {
+ ; Peephole 258.c optimized bitbanging
+ mov a,%1
+ mov c,%2
+ rrc a
+ mov %1,a
+%4:
+%3:
+} if notVolatile %1
+
+// in_byte>>=1; if(in_bit) in_byte|=0x80;
+replace {
+ mov a,r%1
+ clr c
+ rrc a
+ mov r%1,a
+ jnb %2,%3
+%4:
+ orl ar%1,#0x80
+%3:
+} by {
+ ; Peephole 258.d optimized bitbanging
+ mov a,r%1
+ mov c,%2
+ rrc a
+ mov r%1,a
+%4:
+%3:
+}
+
+// out_bit=out_byte&0x80; out_byte<<=1;
+// helps f.e. writing data on a 3-wire (SPI) bus
+replace {
+ mov a,%1
+ rlc a
+ mov %2,c
+ mov a,%1
+ add a,%1
+ mov %1,a
+} by {
+ ; Peephole 258.e optimized bitbanging
+ mov a,%1
+ add a,%1
+ mov %2,c
+ mov %1,a
+} if notVolatile %1
+
+// out_bit=out_byte&0x01; out_byte>>=1;
+replace {
+ mov a,%1
+ rrc a
+ mov %2,c
+ mov a,%1
+ clr c
+ rrc a
+ mov %1,a
+} by {
+ ; Peephole 258.f optimized bitbanging
+ mov a,%1
+ clr c
+ rrc a
+ mov %2,c
+ mov %1,a
+} if notVolatile %1
+
+// Peepholes 259.x are not compatible with peepholex 250.x
+// Peepholes 250.x add jumps to a previously unused label. As the
+// labelRefCount is not increased, peepholes 259.x are (mistakenly) applied.
+// (Mail on sdcc-devel 2004-10-25)
+// Note: Peepholes 193..199, 251 remove jumps to previously used labels without
+// decreasing labelRefCount (less dangerous - this f.e. leads to 253.c being
+// applied instead of 253.b))
+//
+// applies to f.e. vprintf.c
+//replace {
+// sjmp %1
+//%2:
+// ret
+//} by {
+// sjmp %1
+// ; Peephole 259.a removed redundant label %2 and ret
+// ;
+//} if labelRefCount %2 0
+
+// applies to f.e. gets.c
+//replace {
+// ljmp %1
+//%2:
+// ret
+//} by {
+// ljmp %1
+// ; Peephole 259.b removed redundant label %2 and ret
+// ;
+//} if labelRefCount %2 0
+
+// optimizing jumptables
+// Please note: to enable peephole 260.x you currently have to set
+// the environment variable SDCC_SJMP_JUMPTABLE
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+%3:
+} by {
+ ; Peephole 260.a used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+%3:
+} by {
+ ; Peephole 260.b used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+%3:
+} by {
+ ; Peephole 260.c used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+%3:
+} by {
+ ; Peephole 260.d used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+%3:
+} by {
+ ; Peephole 260.e used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+%3:
+} by {
+ ; Peephole 260.f used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+%3:
+} by {
+ ; Peephole 260.g used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+%3:
+} by {
+ ; Peephole 260.h used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+%3:
+} by {
+ ; Peephole 260.i used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+%3:
+} by {
+ ; Peephole 260.j used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+%3:
+} by {
+ ; Peephole 260.k used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+ ljmp %19
+%3:
+} by {
+ ; Peephole 260.l used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+ sjmp %19
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+ ljmp %19
+ ljmp %20
+%3:
+} by {
+ ; Peephole 260.m used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+ sjmp %19
+ sjmp %20
+%3:
+} if labelJTInRange
+
+// applies to: a = (a << 1) | (a >> 15);
+replace {
+ mov a,%1
+ rlc a
+ mov %1,a
+ mov a,%2
+ rlc a
+ mov %2,a
+ mov a,%1
+ mov acc.0,c
+ mov %1,a
+} by {
+ ; Peephole 261.a optimized left rol
+ mov a,%1
+ rlc a
+ xch a,%2
+ rlc a
+ xch a,%2
+ mov acc.0,c
+ mov %1,a
+}
+
+// applies to: a = (a << 15) | (a >> 1);
+replace {
+ mov a,%1
+ rrc a
+ mov %1,a
+ mov a,%2
+ rrc a
+ mov %2,a
+ mov a,%1
+ mov acc.7,c
+ mov %1,a
+} by {
+ ; Peephole 261.b optimized right rol
+ mov a,%1
+ rrc a
+ xch a,%2
+ rrc a
+ xch a,%2
+ mov acc.7,c
+ mov %1,a
+}