mov a,%3
}
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ clr a
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ ; Peephole 3.d removed redundant clr
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ clr a
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ ; Peephole 3.e removed redundant clr
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ clr a
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ ; Peephole 3.f removed redundant clr
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ mov %5,a
+ clr a
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ mov %5,a
+ ; Peephole 3.g removed redundant clr
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,#0x00
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ ; Peephole 3.h changed mov %3,#0x00 to %3,a
+ mov %3,a
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,#0x00
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ ; Peephole 3.i changed mov %4,#0x00 to %4,a
+ mov %4,a
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ mov %5,#0x00
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ ; Peephole 3.j changed mov %5,#0x00 to %5,a
+ mov %5,a
+}
+
+replace {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ mov %5,a
+ mov %6,#0x00
+} by {
+ clr a
+ mov %1,a
+ mov %2,a
+ mov %3,a
+ mov %4,a
+ mov %5,a
+ ; Peephole 3.k changed mov %6,#0x00 to %6,a
+ mov %6,a
+}
+
replace {
mov %1,a
mov dptr,#%2
%1:
} if labelInRange
-
replace {
clr a
cjne %1,%2,%3
%3:
jnz %4
} by {
- ; Peephole 115 jump optimization
+ ; Peephole 115.a jump optimization
cjne %1,%2,%3
sjmp %4
%3:
} if labelRefCount %3 1
+replace {
+ mov %1,a
+ cjne %1,#0x00,%2
+ sjmp %3
+%2:
+} by {
+ ; Peephole 115.b jump optimization
+ mov %1,a
+ jz %3
+%2:
+}
+
replace {
clr a
cjne %1,%2,%3
} by {
; Peephole 123 jump optimization
cjne %1,%2,%3
- smp %4
+ sjmp %4
%3:
} if labelRefCount %3 1
jnc %1
}
+// applies to: bug-524691.c --model-large: while (uRight - uLeft > 1)
replace {
clr a
rlc a
- jnz %1
+ jnz %0
} by {
- ; Peephole 129 jump optimization
- jc %1
+ ; Peephole 129.a jump optimization
+ jc %0
}
-replace {
+// applies to: _fsdiv.c --xstack: if (mant1 < mant2)
+replace {
+ clr a
+ rlc a
+ pop %1
+ jnz %0
+} by {
+ ; Peephole 129.b optimized condition
+ pop %1
+ jc %0
+} if notVolatile %1
+
+// applies to: time.c --xstack: while((days += (LEAP_YEAR(year) ? 366 : 365)) <= epoch)
+replace {
+ clr a
+ rlc a
+ pop %1
+ pop %2
+ jnz %0
+} by {
+ ; Peephole 129.c optimized condition
+ pop %1
+ pop %2
+ jc %0
+} if notVolatile %1 %2
+
+// applies to: _memmove.c --xstack: if (((int)src < (int)dst) && ((((int)src)+acount) > (int)dst))
+replace {
+ clr a
+ rlc a
+ pop %1
+ pop %2
+ pop %3
+ jnz %0
+} by {
+ ; Peephole 129.d optimized condition
+ pop %1
+ pop %2
+ pop %3
+ jc %0
+} if notVolatile %1 %2 %3
+
+replace {
mov r%1,@r%2
} by {
; Peephole 130 changed target address mode r%1 to ar%1
mov ar%1,@r%2
}
-replace {
+replace {
mov a,%1
subb a,#0x01
mov %2,a
mov %1,%2
} by {
; Peephole 131 optimized decrement (not caring for c)
- dec %1
- mov %2,%1
+ dec %1
+ mov %2,%1
+}
+
+// ideally the optimizations of rules 132.x should be done in genCmpXX
+replace {
+ clr c
+ mov a,#%1
+ subb a,%2
+ mov %3,c
+} by {
+ ; Peephole 132.a optimized genCmpGt by inverse logic (acc differs)
+ mov a,%2
+ add a,#0xff - %1
+ mov %3,c
+}
+
+replace {
+ clr c
+ mov a,#%1
+ subb a,%2
+ jnc %5
+} by {
+ ; Peephole 132.b optimized genCmpGt by inverse logic (acc differs)
+ mov a,%2
+ add a,#0xff - %1
+ jnc %5
+}
+
+replace {
+ clr c
+ mov a,#%1
+ subb a,%2
+ jc %5
+} by {
+ ; Peephole 132.c optimized genCmpGt by inverse logic (acc differs)
+ mov a,%2
+ add a,#0xff - %1
+ jc %5
}
+replace {
+ clr c
+ mov a,%1
+ subb a,#%2
+ mov %3,c
+} by {
+ ; Peephole 132.d optimized genCmpGt by inverse logic
+ mov a,#0x100 - %2
+ add a,%1
+ mov %3,c
+} if operandsNotRelated('0x00' %2)
+
+replace {
+ clr c
+ mov a,%1
+ subb a,#%2
+ jnc %5
+} by {
+ ; Peephole 132.e optimized genCmpLt by inverse logic (carry differs)
+ mov a,#0x100 - %2
+ add a,%1
+ jc %5
+} if operandsNotRelated('0x00' %2)
+
+replace {
+ clr c
+ mov a,%1
+ subb a,#%2
+ jc %5
+} by {
+ ; Peephole 132.f optimized genCmpLt by inverse logic (carry differs)
+ mov a,#0x100 - %2
+ add a,%1
+ jnc %5
+} if operandsNotRelated('0x00' %2)
+
+
replace {
mov r%1,%2
mov ar%3,@r%1
xrl %1,#0x80
} by {
; Peephole 159 avoided xrl during execution
- mov %1,#(%2 ^ 0x80)
+ mov %1,#(%2 ^ 0x80)
}
replace {
// applies to f.e. bug-408972.c
// not before peephole 177.c
-replace {
+replace restart {
mov %1,%2
mov %3,%4
mov %2,%1
; Peephole 177.d removed redundant move
mov %1,%2
mov %3,%4
-} if notVolatile %1 %2
+} if notVolatile(%1 %2),operandsNotRelated(%1 %3)
+
+// applies to f.e. bug-607243.c
+// also check notVolatile %3, as it will return FALSE if it's @r%1
+replace {
+ mov r%1,%2
+ mov ar%1,%3
+} by {
+ ; peephole 177.e removed redundant move
+ mov ar%1,%3
+} if notVolatile %2 %3
+
+replace {
+ mov ar%1,%2
+ mov r%1,%3
+} by {
+ ; peephole 177.f removed redundant move
+ mov r%1,%3
+} if notVolatile %2
+
+replace {
+ mov %1,%2
+ mov a,%1
+} by {
+ ; peephole 177.g optimized mov sequence
+ mov a,%2
+ mov %1,a
+} if notVolatile %1
+
+replace {
+ mov %1,%2
+ mov a,%2
+} by {
+ ; peephole 177.h optimized mov sequence
+ mov a,%2
+ mov %1,a
+} if notVolatile %2
replace {
mov a,%1
mov dptr,#(((%2)<<8) + %1)
}
+// applies to return 0.0; in f.e. sincosf.c
+replace {
+ mov dpl,#%1
+ clr a
+ mov dph,a
+} by {
+ ; Peephole 182.d used 16 bit load of dptr
+ mov dptr,#(%1&0x00ff)
+ clr a
+}
+
replace {
anl %1,#%2
anl %1,#%3
movc a,@a+dptr
mov %2,a
mov acc,b
- inc dptr
+ inc dptr
movc a,@a+dptr
mov %3,a
mov acc,b
inc dptr
}
-// char indexed access to: char code table[] = {4,3,2,1};
+// char indexed access to: char code table[] = {4,3,2,1};
replace {
add a,#%1
mov dpl,a
movc a,@a+dptr
}
-// char indexed access to: int code table[] = {4,3,2,1};
+// char indexed access to: int code table[] = {4,3,2,1};
replace {
mov b,#0x02
mul ab
cjne %5,%6,%3
sjmp %7
%3:
- sjmp %8
+ sjmp %8
} by {
; Peephole 197.a optimized misc jump sequence
jnz %8
mov a,%4
cjne %5,%6,%8
sjmp %7
-;%3:
+;%3:
} if labelRefCount %3 2
replace {
push %1
pop %1
} by {
- ; Peephole 211 removed redundant push %1 pop %1
-}
+ ; Peephole 211 removed redundant push %1 pop %1
+}
replace {
mov a,_bp
replace {
mov %1,#(( %2 + %3 >> 8 ) ^ 0x80)
} by {
- ; Peephole 213.b inserted fix
+ ; Peephole 213.b inserted fix
mov %1,#((%2 + %3) >> 8)
xrl %1,#0x80
}
replace {
clr a
- movx @dptr,a
- inc dptr
- movx @dptr,a
- inc dptr
+ movx @%1,a
+ inc %1
+ movx @%1,a
+ inc %1
clr a
} by {
; Peephole 226 removed unnecessary clr
clr a
- movx @dptr,a
- inc dptr
- movx @dptr,a
- inc dptr
+ movx @%1,a
+ inc %1
+ movx @%1,a
+ inc %1
}
replace {
mov %2,%10
mov %3,%11
mov %4,%12
-
+
mov %5,%13
mov %6,%14
mov %7,%15
mov %2,%10
mov %3,%11
mov %4,%12
-
+
mov %5,%13
mov %6,%14
mov %7,%15
mov %2,%6
mov %3,%7
mov %4,%8
-
+
mov %5,%1
mov %6,%2
mov %7,%3
replace {
mov %1,%5
mov %2,%6
-
+
mov %3,%7
mov %4,%8
} by {
mov %1,%5
mov %2,%6
-
+
mov %3,%7
mov %4,%8
; Peephole 238.c removed 2 redundant moves
mov %2,%5
mov %3,%6
; Peephole 238.d removed 3 redundant moves
-} if operandsNotSame6 %1 %2 %3 %4 %5 %6
+} if operandsNotSame6 %1 %2 %3 %4 %5 %6
// applies to f.e. ser_ir.asm
replace {
cjne r%6,#%7,%3
cjne r%8,#%9,%3
mov a,#0x01
- sjmp %10
-%3:
+ sjmp %10
+%3:
clr a
-%10:
+%10:
} by {
; Peephole 241.a optimized compare
clr a
inc a
%3:
%10:
-}
+}
// applies to f.e. time.c
replace {
cjne r%1,#%2,%3
cjne r%4,#%5,%3
mov a,#0x01
- sjmp %6
-%3:
+ sjmp %6
+%3:
clr a
-%6:
+%6:
} by {
; Peephole 241.b optimized compare
clr a
inc a
%3:
%6:
-}
+}
// applies to f.e. malloc.c
replace {
sjmp %4
%3:
clr a
-%4:
+%4:
} by {
; Peephole 241.c optimized compare
clr a
inc a
%3:
%4:
-}
+}
-// applies to f.e. j = (k!=0x1000);
+// applies to f.e. j = (k!=0x1000);
// with volatile idata long k;
replace {
cjne @r%1,#%2,%3
- inc r%1
+ inc r%1
cjne @r%1,#%4,%3
- inc r%1
+ inc r%1
cjne @r%1,#%5,%3
- inc r%1
+ inc r%1
cjne @r%1,#%6,%3
mov a,#0x01
- sjmp %7
-%3:
+ sjmp %7
+%3:
clr a
%7:
} by {
inc a
%3:
%7:
-}
+}
// applies to f.e. j = (k!=0x1000);
// with volatile idata int k;
replace {
cjne @r%1,#%2,%3
- inc r%1
+ inc r%1
cjne @r%1,#%4,%3
mov a,#0x01
- sjmp %7
-%3:
+ sjmp %7
+%3:
clr a
-%7:
+%7:
} by {
; Peephole 241.e optimized compare
clr a
inc a
%3:
%7:
-}
+}
// applies to f.e. vprintf.asm (--stack-auto)
replace {
sjmp %7
%3:
clr a
-%7:
+%7:
} by {
; Peephole 241.f optimized compare
clr a
inc a
%3:
%7:
-}
+}
// applies to f.e. scott-bool1.c
replace {
cjne %1,%2,%3
inc %4
%3:
- sjmp %5
+ sjmp %5
} by {
; Peephole 243 avoided branch to sjmp
cjne %1,%2,%5
inc %4
%3:
- sjmp %5
+ sjmp %5
} if labelInRange
// applies to f.e. simplefloat.c (saving 1 cycle)
rlc a
mov r%1,a
cjne a,#0x01,%2
-%2:
+%2:
clr a
rlc a
mov r%1,a
cpl c
clr a
rlc a
- mov r%1,a
+ mov r%1,a
} if labelRefCount %2 1
// this one will not be triggered if 245 is present
rlc a
mov r%1,a
cjne a,#0x01,%2
-%2:
+%2:
clr a
rlc a
mov r%1,a
rlc a
mov r%1,a
cjne a,#0x01,%2
-%2:
+%2:
clr a
rlc a
mov r%1,a
anl a,#%3
movx @dptr,a
} by {
- ; Peephole 246.a combined clr/clr
+ ; Peephole 246.a combined clr/clr
mov dptr,#%1
movx a,@dptr
anl a,#%2&%3
orl a,#%3
movx @dptr,a
} by {
- ; Peephole 246.b combined set/set
+ ; Peephole 246.b combined set/set
mov dptr,#%1
movx a,@dptr
orl a,#%2|%3
anl a,#%3
movx @dptr,a
} by {
- ; Peephole 246.c combined set/clr
+ ; Peephole 246.c combined set/clr
mov dptr,#%1
movx a,@dptr
orl a,#%2
orl a,#%3
movx @dptr,a
} by {
- ; Peephole 246.d combined clr/set
+ ; Peephole 246.d combined clr/set
mov dptr,#%1
movx a,@dptr
anl a,#%2
anl a,#%4
movx @dptr,a
} by {
- ; Peephole 246.e combined set/clr/clr
+ ; Peephole 246.e combined set/clr/clr
mov dptr,#%1
movx a,@dptr
orl a,#%2
orl a,#%4
movx @dptr,a
} by {
- ; Peephole 246.f combined set/clr/set
+ ; Peephole 246.f combined set/clr/set
mov dptr,#%1
movx a,@dptr
orl a,#%2
anl a,#%4
movx @dptr,a
} by {
- ; Peephole 246.g combined clr/set/clr
+ ; Peephole 246.g combined clr/set/clr
mov dptr,#%1
movx a,@dptr
anl a,#%2
orl a,#%4
movx @dptr,a
} by {
- ; Peephole 246.h combined clr/set/set
+ ; Peephole 246.h combined clr/set/set
mov dptr,#%1
movx a,@dptr
anl a,#%2
anl a,#%3
mov @r%5,a
} by {
- ; Peephole 247.a combined clr/clr
+ ; Peephole 247.a combined clr/clr
mov r%5,#%1
mov a,@r%5
anl a,#%2&%3
orl a,#%3
mov @r%5,a
} by {
- ; Peephole 247.b combined set/set
+ ; Peephole 247.b combined set/set
mov r%5,#%1
mov a,@r%5
orl a,#%2|%3
anl a,#%3
mov @r%5,a
} by {
- ; Peephole 247.c combined set/clr
+ ; Peephole 247.c combined set/clr
mov r%5,#%1
mov a,@r%5
orl a,#%2
orl a,#%3
mov @r%5,a
} by {
- ; Peephole 247.d combined clr/set
+ ; Peephole 247.d combined clr/set
mov r%5,#%1
mov a,@r%5
anl a,#%2
anl a,#%4
mov @r%5,a
} by {
- ; Peephole 247.e combined set/clr/clr
+ ; Peephole 247.e combined set/clr/clr
mov r%5,#%1
mov a,@r%5
orl a,#%2
orl a,#%4
mov @r%5,a
} by {
- ; Peephole 247.f combined set/clr/set
+ ; Peephole 247.f combined set/clr/set
mov r%5,#%1
mov a,@r%5
orl a,#%2
anl a,#%4
mov @r%5,a
} by {
- ; Peephole 247.g combined clr/set/clr
+ ; Peephole 247.g combined clr/set/clr
mov r%5,#%1
mov a,@r%5
anl a,#%2
orl a,#%4
mov @r%5,a
} by {
- ; Peephole 247.h combined clr/set/set
+ ; Peephole 247.h combined clr/set/set
mov r%5,#%1
mov a,@r%5
anl a,#%2
// Peepholes 248.x have to be compatible with the keyword volatile.
// They optimize typical accesses to memory mapped I/O devices:
-// volatile xdata char t; t|=0x01;
+// volatile xdata char t; t|=0x01;
replace {
mov dptr,%1
movx a,@dptr
movx @dptr,a
}
-// volatile xdata char t; t&=0x01;
+// volatile xdata char t; t&=0x01;
replace {
mov dptr,%1
movx a,@dptr
movx @dptr,a
}
-// volatile xdata char t; t^=0x01;
+// volatile xdata char t; t^=0x01;
replace {
mov dptr,%1
movx a,@dptr
movx @dptr,a
}
+// Peeepholes 248.i to 248.m are like 248.d to 248.h except they apply to bitfields:
+// xdata struct { unsigned b0:1; unsigned b1:1; unsigned b2:1; } xport;
+// xport.b0=1; xport.b0=0; xport.b0=1;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.i optimized or/and/or to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.j optimized and/or/and to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%5
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.k optimized or/and to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.l optimized and/or to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+}
+
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.m optimized xor/xor to xdata bitfield
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ xrl a,%4
+ movx @dptr,a
+}
+
+
replace {
jnz %1
%1:
} by {
- ; Peephole 249a jump optimization
+ ; Peephole 249.a jump optimization
} if labelRefCount %1 1
replace {
jz %1
%1:
} by {
- ; Peephole 249b jump optimization
+ ; Peephole 249.b jump optimization
} if labelRefCount %1 1
ret
}
+
// applies to f.e. funptrs.c
// saves one byte if %1 is a register or @register
replace {
mov dptr,%5
jmp @a+dptr
}
+
+// applies to f.e. jump tables and scott-bool1.c.
+// similar peepholes can be constructed for other instructions
+// after which a flag or a register is known (like: djnz, cjne, jnc)
+replace {
+ jc %1
+%2:
+ clr c
+} by {
+ ; Peephole 256.a removed redundant clr c
+ jc %1
+%2:
+} if labelRefCount %2 0
+
+// applies to f.e. logf.c
+replace {
+ jnz %1
+%2:
+ clr a
+} by {
+ ; Peephole 256.b removed redundant clr a
+ jnz %1
+%2:
+} if labelRefCount %2 0
+
+// applies to f.e. bug-905492.c
+replace {
+ jnz %1
+%2:
+ mov %3,#0x00
+} by {
+ ; Peephole 256.c loading %3 with zero from a
+ jnz %1
+%2:
+ mov %3,a
+} if labelRefCount %2 0
+
+// applies to f.e. malloc.c
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %3,#0x00
+} by {
+ ; Peephole 256.d loading %3 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4)
+
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %3,#0x00
+} by {
+ ; Peephole 256.e loading %3 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4 %6)
+
+replace {
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %8,%9
+ mov %3,#0x00
+} by {
+ ; Peephole 256.f loading %2 with zero from a
+ jnz %1
+%2:
+ mov %4,%5
+ mov %6,%7
+ mov %8,%9
+ mov %3,a
+} if labelRefCount(%2 0),operandsNotRelated('a' %4 %6 %8)
+
+
+// unsigned char i=8; do{ } while(--i != 0);
+// this currently only applies if i is kept in a register
+replace {
+ dec %1
+ cjne %1,#0x00,%2
+} by {
+ ; Peephole 257 optimized decrement with compare
+ djnz %1,%2
+} if notVolatile %1
+
+
+// in_byte<<=1; if(in_bit) in_byte|=1;
+// helps f.e. reading data on a 3-wire (SPI) bus
+replace {
+ mov a,%1
+ add a,%1
+ mov %1,a
+ jnb %2,%3
+%4:
+ orl %1,#0x01
+%3:
+} by {
+ ; Peephole 258.a optimized bitbanging
+ mov a,%1
+ mov c,%2
+ addc a,%1
+ mov %1,a
+%4:
+%3:
+} if notVolatile %1
+
+// in_byte<<=1; if(in_bit) in_byte|=1;
+replace {
+ mov a,r%1
+ add a,r%1
+ mov r%1,a
+ jnb %2,%3
+%4:
+ orl ar%1,#0x01
+%3:
+} by {
+ ; Peephole 258.b optimized bitbanging
+ mov a,r%1
+ mov c,%2
+ addc a,r%1
+ mov r%1,a
+%4:
+%3:
+}
+
+// in_byte>>=1; if(in_bit) in_byte|=0x80;
+replace {
+ mov a,%1
+ clr c
+ rrc a
+ mov %1,a
+ jnb %2,%3
+%4:
+ orl %1,#0x80
+%3:
+} by {
+ ; Peephole 258.c optimized bitbanging
+ mov a,%1
+ mov c,%2
+ rrc a
+ mov %1,a
+%4:
+%3:
+} if notVolatile %1
+
+// in_byte>>=1; if(in_bit) in_byte|=0x80;
+replace {
+ mov a,r%1
+ clr c
+ rrc a
+ mov r%1,a
+ jnb %2,%3
+%4:
+ orl ar%1,#0x80
+%3:
+} by {
+ ; Peephole 258.d optimized bitbanging
+ mov a,r%1
+ mov c,%2
+ rrc a
+ mov r%1,a
+%4:
+%3:
+}
+
+// out_bit=out_byte&0x80; out_byte<<=1;
+// helps f.e. writing data on a 3-wire (SPI) bus
+replace {
+ mov a,%1
+ rlc a
+ mov %2,c
+ mov a,%1
+ add a,%1
+ mov %1,a
+} by {
+ ; Peephole 258.e optimized bitbanging
+ mov a,%1
+ add a,%1
+ mov %2,c
+ mov %1,a
+} if notVolatile %1
+
+// out_bit=out_byte&0x01; out_byte>>=1;
+replace {
+ mov a,%1
+ rrc a
+ mov %2,c
+ mov a,%1
+ clr c
+ rrc a
+ mov %1,a
+} by {
+ ; Peephole 258.f optimized bitbanging
+ mov a,%1
+ clr c
+ rrc a
+ mov %2,c
+ mov %1,a
+} if notVolatile %1
+
+// Peepholes 259.x are not compatible with peepholex 250.x
+// Peepholes 250.x add jumps to a previously unused label. As the
+// labelRefCount is not increased, peepholes 259.x are (mistakenly) applied.
+// (Mail on sdcc-devel 2004-10-25)
+// Note: Peepholes 193..199, 251 remove jumps to previously used labels without
+// decreasing labelRefCount (less dangerous - this f.e. leads to 253.c being
+// applied instead of 253.b))
+//
+// applies to f.e. vprintf.c
+//replace {
+// sjmp %1
+//%2:
+// ret
+//} by {
+// sjmp %1
+// ; Peephole 259.a removed redundant label %2 and ret
+// ;
+//} if labelRefCount %2 0
+
+// applies to f.e. gets.c
+//replace {
+// ljmp %1
+//%2:
+// ret
+//} by {
+// ljmp %1
+// ; Peephole 259.b removed redundant label %2 and ret
+// ;
+//} if labelRefCount %2 0
+
+// optimizing jumptables
+// Please note: to enable peephole 260.x you currently have to set
+// the environment variable SDCC_SJMP_JUMPTABLE
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+%3:
+} by {
+ ; Peephole 260.a used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+%3:
+} by {
+ ; Peephole 260.b used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+%3:
+} by {
+ ; Peephole 260.c used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+%3:
+} by {
+ ; Peephole 260.d used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+%3:
+} by {
+ ; Peephole 260.e used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+%3:
+} by {
+ ; Peephole 260.f used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+%3:
+} by {
+ ; Peephole 260.g used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+%3:
+} by {
+ ; Peephole 260.h used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+%3:
+} by {
+ ; Peephole 260.i used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+%3:
+} by {
+ ; Peephole 260.j used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+%3:
+} by {
+ ; Peephole 260.k used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+ ljmp %19
+%3:
+} by {
+ ; Peephole 260.l used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+ sjmp %19
+%3:
+} if labelJTInRange
+
+// optimizing jumptables
+replace {
+ add a,%1
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ ljmp %5
+ ljmp %6
+ ljmp %7
+ ljmp %8
+ ljmp %9
+ ljmp %10
+ ljmp %11
+ ljmp %12
+
+ ljmp %13
+ ljmp %14
+ ljmp %15
+ ljmp %16
+ ljmp %17
+ ljmp %18
+ ljmp %19
+ ljmp %20
+%3:
+} by {
+ ; Peephole 260.m used sjmp in jumptable
+ mov dptr,#%2
+ jmp @a+dptr
+%2:
+ sjmp %5
+ sjmp %6
+ sjmp %7
+ sjmp %8
+ sjmp %9
+ sjmp %10
+ sjmp %11
+ sjmp %12
+
+ sjmp %13
+ sjmp %14
+ sjmp %15
+ sjmp %16
+ sjmp %17
+ sjmp %18
+ sjmp %19
+ sjmp %20
+%3:
+} if labelJTInRange
+
+// applies to: a = (a << 1) | (a >> 15);
+replace {
+ mov a,%1
+ rlc a
+ mov %1,a
+ mov a,%2
+ rlc a
+ mov %2,a
+ mov a,%1
+ mov acc.0,c
+ mov %1,a
+} by {
+ ; Peephole 261.a optimized left rol
+ mov a,%1
+ rlc a
+ xch a,%2
+ rlc a
+ xch a,%2
+ mov acc.0,c
+ mov %1,a
+}
+
+// applies to: a = (a << 15) | (a >> 1);
+replace {
+ mov a,%1
+ rrc a
+ mov %1,a
+ mov a,%2
+ rrc a
+ mov %2,a
+ mov a,%1
+ mov acc.7,c
+ mov %1,a
+} by {
+ ; Peephole 261.b optimized right rol
+ mov a,%1
+ rrc a
+ xch a,%2
+ rrc a
+ xch a,%2
+ mov acc.7,c
+ mov %1,a
+}