+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.d optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc r%1
+ cjne @r%1,#%5,%3
+ inc r%1
+ cjne @r%1,#%6,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata int k;
+replace {
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.e optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc r%1
+ cjne @r%1,#%4,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. vprintf.asm (--stack-auto)
+replace {
+ cjne @r%1,#%2,%3
+ mov a,#0x01
+ sjmp %7
+%3:
+ clr a
+%7:
+} by {
+ ; Peephole 241.f optimized compare
+ clr a
+ cjne @r%1,#%2,%3
+ inc a
+%3:
+%7:
+}
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+%1:
+ jz %4
+} by {
+ ; Peephole 242.a avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+%1:
+ jz %4
+} by {
+ ; Peephole 242.b avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. logic.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+%1:
+ jz %4
+} by {
+ ; Peephole 242.c avoided branch jnz to jz
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. vprintf.c
+// this is a rare case, usually the "tail increment" is noticed earlier
+replace {
+ cjne %1,%2,%3
+ inc %4
+%3:
+ sjmp %5
+} by {
+ ; Peephole 243 avoided branch to sjmp
+ cjne %1,%2,%5
+ inc %4
+%3:
+ sjmp %5
+} if labelInRange
+
+// applies to f.e. simplefloat.c (saving 1 cycle)
+replace {
+ mov r%1,dpl
+ mov a,r%1
+} by {
+ ; Peephole 244.a moving first to a instead of r%1
+ mov a,dpl
+ mov r%1,a
+}
+
+// applies to f.e. _itoa.c (saving 1 cycle)
+replace {
+ mov r%1,dph
+ mov a,r%1
+} by {
+ ; Peephole 244.b moving first to a instead of r%1
+ mov a,dph
+ mov r%1,a
+}
+
+
+// applies to f.e. bug-460010.c (saving 1 cycle)
+replace {
+ mov r%1,a
+ mov dpl,r%1
+} by {
+ ; Peephole 244.c loading dpl from a instead of r%1
+ mov r%1,a
+ mov dpl,a
+}
+
+replace {
+ mov r%1,a
+ mov dph,r%1
+} by {
+ ; Peephole 244.d loading dph from a instead of r%1
+ mov r%1,a
+ mov dph,a
+}
+
+// this one is safe but disables 245.a 245.b
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+} by {
+ ; Peephole 245 optimized complement (r%1 and acc set needed?)
+ cpl c
+ clr a
+ rlc a
+ mov r%1,a
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. vprintf.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jz %3
+} by {
+ ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
+ jc %3
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jnz %3
+} by {
+ ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
+ jnc %3
+} if labelRefCount %2 1
+
+
+// rules 246.x apply to f.e. bitfields.c
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.a combined clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2&%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.b combined set/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2|%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.c combined set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.d combined clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.e combined set/clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3&%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.f combined set/clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.g combined clr/set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.h combined clr/set/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3|%4
+ movx @dptr,a
+} if notVolatile %1
+
+
+
+
+// rules 247.x apply to f.e. bitfields.c
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.a combined clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2&%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.b combined set/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2|%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.c combined set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.d combined clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.e combined set/clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3&%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.f combined set/clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.g combined clr/set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%4
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.h combined clr/set/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3|%4
+ mov @r%5,a
+} if notVolatile %1
+
+
+// Peepholes 248.x have to be compatible with the keyword volatile.
+// They optimize typical accesses to memory mapped I/O devices:
+// volatile xdata char t; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ orl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.a optimized or to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ anl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.b optimized and to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ xrl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.c optimized xor to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.d optimized or/and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.e optimized and/or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.f optimized or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.g optimized and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.h optimized xor/xor to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+}
+
+replace {
+ jnz %1
+%1:
+} by {
+ ; Peephole 249.a jump optimization
+} if labelRefCount %1 1
+
+replace {
+ jz %1
+%1:
+} by {
+ ; Peephole 249.b jump optimization
+} if labelRefCount %1 1
+
+
+// This allows non-interrupt and interrupt code to safely compete
+// for a resource without the non-interrupt code having to disable
+// interrupts:
+// volatile bit resource_is_free;
+// if( resource_is_free ) {
+// resource_is_free=0; do_something; resource_is_free=1;
+// }
+replace {
+ jnb %1,%2
+%3:
+ clr %1
+} by {
+ ; Peephole 250.a using atomic test and clear
+ jbc %1,%3
+ sjmp %2
+%3:
+} if labelRefCount %3 0
+
+replace {
+ jb %1,%2
+ ljmp %3
+%2:
+ clr %1
+} by {
+ ; Peephole 250.b using atomic test and clear
+ jbc %1,%2
+ ljmp %3
+%2:
+} if labelRefCount %2 1
+
+
+// not before peephole 250.b
+replace {
+ ljmp %5
+} by {
+ ; Peephole 251.a replaced ljmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+// not before peephole 250.b
+replace {
+ sjmp %5
+} by {
+ ; Peephole 251.b replaced sjmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+// applies to shifts.c and when accessing arrays with an unsigned integer index
+// saving 1 byte, 2 cycles
+replace {
+ mov r%1,%2
+ mov a,(%2 + 1)
+ xch a,r%1
+ add a,acc
+ xch a,r%1
+ rlc a
+ mov r%3,a