+ mov r%1,acc
+} by {
+ ; Peephole 239 used a instead of acc
+ mov r%1,a
+}
+
+replace restart {
+ mov a,%1
+ addc a,#0x00
+} by {
+ ; Peephole 240 use clr instead of addc a,#0
+ clr a
+ addc a,%1
+}
+
+// peepholes 241.a to 241.d and 241.e to 241.h need to be in order
+replace {
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ cjne r%6,#%7,%0
+ cjne r%8,#%9,%0
+ mov a,#0x01
+ sjmp %1
+%0:
+ clr a
+%1:
+} by {
+ ; Peephole 241.a optimized compare
+ clr a
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ cjne r%6,#%7,%0
+ cjne r%8,#%9,%0
+ inc a
+%0:
+%1:
+} if labelRefCountChange(%1 -1)
+
+// applies to generic pointer compare
+replace {
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ cjne r%6,#%7,%0
+ mov a,#0x01
+ sjmp %1
+%0:
+ clr a
+%1:
+} by {
+ ; Peephole 241.b optimized compare
+ clr a
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ cjne r%6,#%7,%0
+ inc a
+%0:
+%1:
+} if labelRefCountChange(%1 -1)
+
+// applies to f.e. time.c
+replace {
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ mov a,#0x01
+ sjmp %1
+%0:
+ clr a
+%1:
+} by {
+ ; Peephole 241.c optimized compare
+ clr a
+ cjne r%2,#%3,%0
+ cjne r%4,#%5,%0
+ inc a
+%0:
+%1:
+} if labelRefCountChange(%1 -1)
+
+// applies to f.e. malloc.c
+replace {
+ cjne r%2,#%3,%0
+ mov a,#0x01
+ sjmp %1
+%0:
+ clr a
+%1:
+} by {
+ ; Peephole 241.d optimized compare
+ clr a
+ cjne r%2,#%3,%0
+ inc a
+%0:
+%1:
+} if labelRefCountChange(%1 -1)
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata long k;
+replace {
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ inc r%0
+ cjne @r%0,#%5,%1
+ inc r%0
+ cjne @r%0,#%6,%1
+ mov a,#0x01
+ sjmp %2
+%1:
+ clr a
+%2:
+} by {
+ ; Peephole 241.e optimized compare
+ clr a
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ inc r%0
+ cjne @r%0,#%5,%1
+ inc r%0
+ cjne @r%0,#%6,%1
+ inc a
+%1:
+%2:
+} if labelRefCountChange(%2 -1)
+
+// applies to f.e. j = (p!=NULL);
+// with volatile idata char *p;
+replace {
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ inc r%0
+ cjne @r%0,#%5,%1
+ mov a,#0x01
+ sjmp %2
+%1:
+ clr a
+%2:
+} by {
+ ; Peephole 241.f optimized compare
+ clr a
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ inc r%0
+ cjne @r%0,#%5,%1
+ inc a
+%1:
+%2:
+} if labelRefCountChange(%2 -1)
+
+// applies to f.e. j = (k!=0x1000);
+// with volatile idata int k;
+replace {
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ mov a,#0x01
+ sjmp %2
+%1:
+ clr a
+%2:
+} by {
+ ; Peephole 241.g optimized compare
+ clr a
+ cjne @r%0,#%3,%1
+ inc r%0
+ cjne @r%0,#%4,%1
+ inc a
+%1:
+%2:
+} if labelRefCountChange(%2 -1)
+
+// applies to f.e. vprintf.asm (--stack-auto)
+replace {
+ cjne @r%0,#%3,%1
+ mov a,#0x01
+ sjmp %2
+%1:
+ clr a
+%2:
+} by {
+ ; Peephole 241.h optimized compare
+ clr a
+ cjne @r%0,#%3,%1
+ inc a
+%1:
+%2:
+} if labelRefCountChange(%2 -1)
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+%1:
+ jz %4
+} by {
+ jnz %1
+ mov %2,%3
+ ; Peephole 242.a avoided branch jnz to jz
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. scott-bool1.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+%1:
+ jz %4
+} by {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ ; Peephole 242.b avoided branch jnz to jz
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. logic.c
+replace {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+%1:
+ jz %4
+} by {
+ jnz %1
+ mov %2,%3
+ orl a,%5
+ orl a,%6
+ orl a,%7
+ ; Peephole 242.c avoided branch jnz to jz
+ jz %4
+%1:
+} if labelRefCount %1 1
+
+// applies to f.e. vprintf.c
+// this is a rare case, usually the "tail increment" is noticed earlier
+replace {
+ cjne %1,%2,%3
+ inc %4
+%3:
+ sjmp %5
+} by {
+ ; Peephole 243 avoided branch to sjmp
+ cjne %1,%2,%5
+ inc %4
+%3:
+ sjmp %5
+} if labelInRange(), labelRefCountChange(%3 -1), labelRefCountChange(%5 1)
+
+// applies to f.e. simplefloat.c (saving 1 cycle)
+replace {
+ mov r%1,dpl
+ mov a,r%1
+} by {
+ ; Peephole 244.a moving first to a instead of r%1
+ mov a,dpl
+ mov r%1,a
+}
+
+// applies to f.e. _itoa.c (saving 1 cycle)
+replace {
+ mov r%1,dph
+ mov a,r%1
+} by {
+ ; Peephole 244.b moving first to a instead of r%1
+ mov a,dph
+ mov r%1,a
+}
+
+
+// applies to f.e. bug-460010.c (saving 1 cycle)
+replace {
+ mov r%1,a
+ mov dpl,r%1
+} by {
+ mov r%1,a
+ ; Peephole 244.c loading dpl from a instead of r%1
+ mov dpl,a
+}
+
+replace {
+ mov r%1,a
+ mov dph,r%1
+} by {
+ mov r%1,a
+ ; Peephole 244.d loading dph from a instead of r%1
+ mov dph,a
+}
+
+// this one is safe but disables 245.a 245.b
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+} by {
+ ; Peephole 245 optimized complement (r%1 and acc set needed?)
+ cpl c
+ clr a
+ rlc a
+ mov r%1,a
+} if labelRefCount(%2 1), labelRefCountChange(%2 -1)
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. vprintf.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jz %3
+} by {
+ ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
+ jc %3
+} if labelRefCount(%2 1), labelRefCountChange(%2 -1)
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jnz %3
+} by {
+ ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
+ jnc %3
+} if labelRefCount(%2 1), labelRefCountChange(%2 -1)
+
+
+// rules 246.x apply to f.e. bitfields.c
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ ; Peephole 246.a combined clr/clr
+ anl a,#%2&%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ ; Peephole 246.b combined set/set
+ orl a,#%2|%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ ; Peephole 246.c combined set/clr
+ anl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ ; Peephole 246.d combined clr/set
+ orl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ ; Peephole 246.e combined set/clr/clr
+ anl a,#%3&%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ ; Peephole 246.f combined set/clr/set
+ orl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ ; Peephole 246.g combined clr/set/clr
+ anl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ ; Peephole 246.h combined clr/set/set
+ orl a,#%3|%4
+ movx @dptr,a
+} if notVolatile %1
+
+
+// rules 247.x apply to f.e. bitfields.c
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ ; Peephole 247.a combined clr/clr
+ anl a,#%2&%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ ; Peephole 247.b combined set/set
+ orl a,#%2|%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ ; Peephole 247.c combined set/clr
+ anl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ ; Peephole 247.d combined clr/set
+ orl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ ; Peephole 247.e combined set/clr/clr
+ anl a,#%3&%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%4
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ ; Peephole 247.f combined set/clr/set
+ orl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ ; Peephole 247.g combined clr/set/clr
+ anl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%4
+ orl a,#%4
+ mov @r%5,a
+} by {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ ; Peephole 247.h combined clr/set/set
+ orl a,#%3|%4
+ mov @r%5,a
+} if notVolatile %1
+
+
+// Peepholes 248.x have to be compatible with the keyword volatile.
+// They optimize typical accesses to memory mapped I/O devices:
+// volatile xdata char t; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ orl a,r%2
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ ; Peephole 248.a optimized or to xdata
+ orl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ anl a,r%2
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ ; Peephole 248.b optimized and to xdata
+ anl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ xrl a,r%2
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ ; Peephole 248.c optimized xor to xdata
+ xrl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ ; Peephole 248.d optimized or/and/or to volatile xdata
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ ; Peephole 248.e optimized and/or/and to volatile xdata
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ ; Peephole 248.f optimized or/and to volatile xdata
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ ; Peephole 248.g optimized and/or to volatile xdata
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ ; Peephole 248.h optimized xor/xor to volatile xdata
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+}
+
+// Peeepholes 248.i to 248.m are like 248.d to 248.h except they apply to bitfields:
+// xdata struct { unsigned b0:1; unsigned b1:1; unsigned b2:1; } xport;
+// xport.b0=1; xport.b0=0; xport.b0=1;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ ; Peephole 248.i optimized or/and/or to xdata bitfield
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ ; Peephole 248.j optimized and/or/and to xdata bitfield
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ ; Peephole 248.k optimized or/and to xdata bitfield
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ ; Peephole 248.l optimized and/or to xdata bitfield
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+} by {
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ ; Peephole 248.m optimized xor/xor to xdata bitfield
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+}
+
+
+replace {
+ jnz %1
+%1:
+} by {
+ ; Peephole 249.a jump optimization
+} if labelRefCount(%1 1), labelRefCountChange(%1 -1)
+
+replace {
+ jz %1
+%1:
+} by {
+ ; Peephole 249.b jump optimization
+} if labelRefCount(%1 1), labelRefCountChange(%1 -1)
+
+
+// This allows non-interrupt and interrupt code to safely compete
+// for a resource without the non-interrupt code having to disable
+// interrupts:
+// volatile bit resource_is_free;
+// if( resource_is_free ) {
+// resource_is_free=0; do_something; resource_is_free=1;
+// }
+replace {
+ jnb %1,%2
+%3:
+ clr %1
+} by {
+ ; Peephole 250.a using atomic test and clear
+ jbc %1,%3
+ sjmp %2
+%3:
+} if labelRefCount(%3 0), labelRefCountChange(%3 1)
+
+replace {
+ jb %1,%2
+ ljmp %3
+%2:
+ clr %1
+} by {
+ ; Peephole 250.b using atomic test and clear
+ jbc %1,%2
+ ljmp %3
+%2:
+} if labelRefCount %2 1
+
+
+// not before peephole 250.b
+replace {
+ ljmp %5
+} by {
+ ; Peephole 251.a replaced ljmp to ret with ret
+ ret
+} if labelIsReturnOnly(), labelRefCountChange(%5 -1)
+
+// not before peephole 250.b
+replace {
+ sjmp %5
+} by {
+ ; Peephole 251.b replaced sjmp to ret with ret
+ ret
+} if labelIsReturnOnly(), labelRefCountChange(%5 -1)
+
+// applies to shifts.c and when accessing arrays with an unsigned integer index
+// saving 1 byte, 2 cycles
+replace {
+ mov r%1,%2
+ mov a,(%2 + 1)
+ xch a,r%1
+ add a,acc
+ xch a,r%1
+ rlc a
+ mov r%3,a
+} by {
+ ; Peephole 252 optimized left shift
+ mov a,%2
+ add a,acc
+ mov r%1,a
+ mov a,(%2 + 1)
+ rlc a
+ mov r%3,a
+}
+
+// applies to f.e. funptrs.c
+// saves one byte if %1 is a register or @register
+replace {
+ mov a,%1
+ add a,acc