//
// added by Jean Louis VERN for
// his shift stuff
-replace restart {
+replace {
xch a,%1
xch a,%1
} by {
; Peephole 2.a removed redundant xch xch
}
-replace restart {
+replace {
// saving 2 byte
mov %1,#0x00
mov a,#0x00
mov %1,a
}
-replace restart {
+replace {
// saving 1 byte
mov %1,#0x00
clr a
mov %1,a
}
-replace restart {
+replace {
// saving 1 byte, loosing 1 cycle but maybe allowing peephole 3.b to start
mov %1,#0x00
mov %2,#0x00
mov %1,a
mov dptr,#%2
movx @dptr,a
-}
+} if notVolatile %1
+
replace {
mov a,acc
} by {
movx @dptr,a
inc dptr
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov %1,%2
%7:
mov sp,bp
pop bp
-}
+} if notVolatile %1
replace {
mov %1,%2
} by {
; Peephole 105 removed redundant mov
mov %1,a
-}
+} if notVolatile %1
replace {
mov %1,a
; Peephole 106 removed redundant mov
mov %1,a
clr c
-}
+} if notVolatile %1
replace {
ljmp %1
%2:
} if labelInRange
+
+replace {
+ ljmp %5
+} by {
+ ; Peephole 244 replaced ljmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+
replace {
ljmp %5
%1:
mov r%1,%2
inc @r%1
mov ar%3,@r%1
-}
+} if notVolatile
replace {
mov r%1,%2
mov r%1,%2
dec @r%1
mov ar%3,@r%1
-}
+} if notVolatile
replace {
mov r%1,a
mov %1,a
mov dpl,%2
mov dph,%3
-}
+} if notVolatile %1
// WTF? Doesn't look sensible to me...
//replace {
; Peephole 166 removed redundant mov
mov %1,%2
mov %3,%1
-}
+} if notVolatile %1 %2
replace {
mov c,%1
; Peephole 176 optimized increment, removed redundant mov
inc @r%2
mov %1,@r%2
-}
+} if notVolatile
// this one will screw assignes to volatile/sfr's
-//replace {
-// mov %1,%2
-// mov %2,%1
-//} by {
-// ; Peephole 177 removed redundant mov
-// mov %1,%2
-//}
+replace {
+ mov %1,%2
+ mov %2,%1
+} by {
+ ; Peephole 177 removed redundant mov
+ mov %1,%2
+} if notVolatile %1 %2
// applies to f.e. scott-add.asm (--model-large)
replace {
mov b,a
}
+// applies to:
+// volatile xdata char t; t=0x01; t=0x03;
+replace {
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov dptr,%1
+} by {
+ ; Peephole 180.a removed redundant mov to dptr
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+}
+
+// volatile xdata char t; t=0x01; t=0x03; t=0x01;
+replace {
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov a,%3
+ movx @dptr,a
+ mov dptr,%1
+} by {
+ ; Peephole 180.b removed redundant mov to dptr
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov a,%3
+ movx @dptr,a
+}
+
// saving 1 byte, 0 cycles
replace {
mov a,#0x00
} by {
- ; Peephole 180 changed mov to clr
+ ; Peephole 181 changed mov to clr
clr a
}
; Peephole 184 removed redundant mov
cpl a
mov %1,a
-}
+} if notVolatile %1
replace {
// acc being incremented might cause problems
; Peephole 185 changed order of increment (acc incremented also!)
inc a
mov %1,a
-}
+} if notVolatile %1
replace {
add a,#%1
mov dptr,%2
movc a,@a+dptr
mov %1,a
-}
+} if notVolatile %1
replace {
anl a,#0x0f
; Peephole 189 removed redundant mov and anl
anl a,#0x0f
mov %1,a
-}
+} if notVolatile %1
// rules 190 & 191 need to be in order
replace {
; Peephole 190 removed redundant mov
mov a,%1
lcall __gptrput
-}
+} if notVolatile %1
replace {
mov %1,a
mov dpl,%2
mov dph,%3
mov b,%4
-}
+} if notVolatile %1
replace {
mov r%1,a
; Peephole 204 removed redundant mov
add a,acc
mov %1,a
-}
+} if notVolatile %1
replace {
djnz %1,%2
mov %1,%1
} by {
; Peephole 206 removed redundant mov %1,%1
-}
+} if notVolatile
replace {
mov a,_bp
xrl %1,#0x80
}
+
replace {
mov %1,a
mov a,%2
mov %1 + %2,(%2 + %1)
} by {
; Peephole 221.a remove redundant move
-}
+} if notVolatile
replace {
mov (%1 + %2 + %3),((%2 + %1) + %3)
} by {
; Peephole 221.b remove redundant move
-}
+} if notVolatile
replace {
dec r%1
; Peephole 223 removed redundant dph/dpl moves
mov %1,dpl
mov %2,dph
-}
+} if notVolatile %1 %2
replace {
mov %1,dpl
; Peephole 224 removed redundant dph/dpl moves
mov %1,dpl
mov (%1 + 1),dph
-}
+} if notVolatile %1
replace {
mov a,%1
mov dpl,%2
mov dph,%3
mov b,%4
-}
+} if notVolatile %1
replace {
clr a
mov a,#%2
movx @dptr,a
} by {
- ; Peephole 230 replaced inefficient 16 constant
+ ; Peephole 230 replaced inefficient 16 bit constant
mov dptr,#%1
mov a,#%2
movx @dptr,a
%3:
sjmp %5
} if labelInRange
+
+// applies to f.e. simplefloat.c (saving 1 cycle)
+replace {
+ mov r%1,dpl
+ mov a,r%1
+} by {
+ ; Peephole 244.a moving first to a instead of r%1
+ mov a,dpl
+ mov r%1,a
+}
+
+// applies to f.e. _itoa.c (saving 1 cycle)
+replace {
+ mov r%1,dph
+ mov a,r%1
+} by {
+ ; Peephole 244.b moving first to a instead of r%1
+ mov a,dph
+ mov r%1,a
+}
+
+
+// applies to f.e. bug-460010.c (saving 1 cycle)
+replace {
+ mov r%1,a
+ mov dpl,r%1
+} by {
+ ; Peephole 244.c loading dpl from a instead of r%1
+ mov r%1,a
+ mov dpl,a
+}
+
+replace {
+ mov r%1,a
+ mov dph,r%1
+} by {
+ ; Peephole 244.d loading dph from a instead of r%1
+ mov r%1,a
+ mov dph,a
+}
+
+// this one is safe but disables 245.a 245.b
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+} by {
+ ; Peephole 245 optimized complement (r%1 and acc set needed?)
+ cpl c
+ clr a
+ rlc a
+ mov r%1,a
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. vprintf.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jz %3
+} by {
+ ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
+ jc %3
+} if labelRefCount %2 1
+
+// this one will not be triggered if 245 is present
+// please remove 245 if 245.a 245.b are found to be safe
+// applies to f.e. scott-compare.c
+replace {
+ clr a
+ rlc a
+ mov r%1,a
+ cjne a,#0x01,%2
+%2:
+ clr a
+ rlc a
+ mov r%1,a
+ jnz %3
+} by {
+ ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
+ jnc %3
+} if labelRefCount %2 1
+
+
+// rules 246.x apply to f.e. bitfields.c
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.a combined clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2&%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.b combined set/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2|%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.c combined set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%3
+ movx @dptr,a
+} by {
+ ; Peephole 246.d combined clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.e combined set/clr/clr
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3&%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.f combined set/clr/set
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.g combined clr/set/clr
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ movx @dptr,a
+} if notVolatile %1
+
+replace {
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3
+ movx @dptr,a
+ mov dptr,#%1
+ movx a,@dptr
+ orl a,#%4
+ movx @dptr,a
+} by {
+ ; Peephole 246.h combined clr/set/set
+ mov dptr,#%1
+ movx a,@dptr
+ anl a,#%2
+ orl a,#%3|%4
+ movx @dptr,a
+} if notVolatile %1
+
+
+
+
+// rules 247.x apply to f.e. bitfields.c
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.a combined clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2&%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.b combined set/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2|%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.c combined set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%3
+ mov @r%5,a
+} by {
+ ; Peephole 247.d combined clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.e combined set/clr/clr
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3&%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.f combined set/clr/set
+ mov r%5,#%1
+ mov a,@r%5
+ orl a,#%2
+ anl a,#%3
+ orl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.g combined clr/set/clr
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ anl a,#%4
+ mov @r%5,a
+} if notVolatile %1
+
+replace {
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3
+ mov @r%5,a
+ mov r%5,#%1
+ mov a,@r%4
+ orl a,#%4
+ mov @r%5,a
+} by {
+ ; Peephole 247.h combined clr/set/set
+ mov r%5,#%1
+ mov a,@r%5
+ anl a,#%2
+ orl a,#%3|%4
+ mov @r%5,a
+} if notVolatile %1
+
+
+// Peepholes 248.x have to be compatible with the keyword volatile.
+// They optimize typical accesses to memory mapped I/O devices:
+// volatile xdata char t; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ orl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.a optimized or to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ anl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.b optimized and to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ mov dptr,%1
+ mov a,%3
+ xrl a,r%2
+ movx @dptr,a
+} by {
+ ; Peephole 248.c optimized xor to xdata
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.d optimized or/and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ anl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+} by {
+ ; Peephole 248.e optimized and/or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ orl a,%4
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%5
+ movx @dptr,a
+}
+
+// volatile xdata char t; t|=0x01; t&=~0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.f optimized or/and to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ orl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ anl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t&=~0x01; t|=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ anl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.g optimized and/or to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ anl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ orl a,%4
+ movx @dptr,a
+}
+
+// volatile xdata char t; t^=0x01; t^=0x01;
+replace {
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%3
+ movx @dptr,a
+
+ mov dptr,%1
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+} by {
+ ; Peephole 248.h optimized xor/xor to volatile xdata
+ mov dptr,%1
+ movx a,@dptr
+ xrl a,%3
+ movx @dptr,a
+ movx a,@dptr
+ mov r%2,a
+ xrl a,%4
+ movx @dptr,a
+}
+