// push %1
//} by {
// ; Peephole 2 removed pop %1 push %1 (not push pop)
-// mov %2,%3
+// mov %2,%3
//}
//
mov %1,a
mov dptr,#%2
movx @dptr,a
-}
+} if notVolatile %1
+
replace {
mov a,acc
} by {
movx @dptr,a
inc dptr
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov %1,%2
%7:
mov sp,bp
pop bp
-}
+} if notVolatile %1
replace {
mov %1,%2
} by {
; Peephole 105 removed redundant mov
mov %1,a
-}
+} if notVolatile %1
replace {
mov %1,a
; Peephole 106 removed redundant mov
mov %1,a
clr c
-}
+} if notVolatile %1
replace {
ljmp %1
ljmp %5
%2:
} by {
- ; Peephole 112 removed ljmp by inverse jump logic
+ ; Peephole 112.a removed ljmp by inverse jump logic
jb %1,%5
%2:
} if labelInRange
-
-replace {
- ljmp %5
-} by {
- ; Peephole 244 replaced ljmp to ret with ret
- ret
-} if labelIsReturnOnly
-
-
replace {
ljmp %5
%1:
} by {
- ; Peephole 132 changed ljmp to sjmp
+ ; Peephole 112.b changed ljmp to sjmp
sjmp %5
%1:
} if labelInRange
mov r%1,%2
inc @r%1
mov ar%3,@r%1
-}
+} if notVolatile
replace {
mov r%1,%2
mov r%1,%2
dec @r%1
mov ar%3,@r%1
-}
+} if notVolatile
replace {
mov r%1,a
mov %1,a
mov dpl,%2
mov dph,%3
-}
+} if notVolatile %1
// WTF? Doesn't look sensible to me...
//replace {
clr c
addc a,%1
} by {
- ; Peephole 145 changed to add without carry
+ ; Peephole 145 changed to add without carry
add a,%1
}
; Peephole 166 removed redundant mov
mov %1,%2
mov %3,%1
-}
+} if notVolatile %1 %2
replace {
mov c,%1
; Peephole 176 optimized increment, removed redundant mov
inc @r%2
mov %1,@r%2
-}
+} if notVolatile
// this one will screw assignes to volatile/sfr's
-//replace {
-// mov %1,%2
-// mov %2,%1
-//} by {
-// ; Peephole 177 removed redundant mov
-// mov %1,%2
-//}
+replace {
+ mov %1,%2
+ mov %2,%1
+} by {
+ ; Peephole 177.a removed redundant mov
+ mov %1,%2
+} if notVolatile %1 %2
// applies to f.e. scott-add.asm (--model-large)
replace {
mov r%1,a
mov a,ar%1
} by {
- ; Peephole 177 removed redundant mov
+ ; Peephole 177.b removed redundant mov
mov r%1,a
}
+// applies to f.e. bug-408972.c
+replace {
+ mov %1,%2
+ mov %1,%3
+} by {
+ ; Peephole 177.c removed redundant move
+ mov %1,%3
+} if notVolatile %1 %2
+
+// applies to f.e. bug-408972.c
+// not before peephole 177.c
+replace {
+ mov %1,%2
+ mov %3,%4
+ mov %2,%1
+} by {
+ ; Peephole 177.d removed redundant move
+ mov %1,%2
+ mov %3,%4
+} if notVolatile %1 %2
+
replace {
mov a,%1
mov b,a
mov b,a
}
+// applies to:
+// volatile xdata char t; t=0x01; t=0x03;
+replace {
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov dptr,%1
+} by {
+ ; Peephole 180.a removed redundant mov to dptr
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+}
+
+// volatile xdata char t; t=0x01; t=0x03; t=0x01;
+replace {
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov a,%3
+ movx @dptr,a
+ mov dptr,%1
+} by {
+ ; Peephole 180.b removed redundant mov to dptr
+ mov dptr,%1
+ mov a,%2
+ movx @dptr,a
+ mov a,%3
+ movx @dptr,a
+}
+
// saving 1 byte, 0 cycles
replace {
mov a,#0x00
} by {
- ; Peephole 180 changed mov to clr
+ ; Peephole 181 changed mov to clr
clr a
}
; Peephole 184 removed redundant mov
cpl a
mov %1,a
-}
+} if notVolatile %1
replace {
// acc being incremented might cause problems
; Peephole 185 changed order of increment (acc incremented also!)
inc a
mov %1,a
-}
+} if notVolatile %1
replace {
add a,#%1
movc a,@a+dptr
mov %3,a
mov acc,b
- inc dptr
+ inc dptr
}
replace {
inc dptr
}
+// char indexed access to: char code table[] = {4,3,2,1};
replace {
add a,#%1
mov dpl,a
movc a,@a+dptr
}
+// char indexed access to: int code table[] = {4,3,2,1};
+replace {
+ mov b,#0x02
+ mul ab
+ add a,#%2
+ mov dpl,a
+ mov a,b
+ addc a,#(%2 >> 8)
+ mov dph,a
+ clr a
+ movc a,@a+dptr
+ mov %3,a
+ mov a,#0x01
+ movc a,@a+dptr
+} by {
+ ; Peephole 186.e optimized movc sequence (b, dptr differ)
+ add a,acc
+ mov b,a
+ mov dptr,#%2
+ jnc .+3
+ inc dph
+ movc a,@a+dptr
+ mov %3,a
+ mov a,b
+ inc a
+ movc a,@a+dptr
+}
+
replace {
mov r%1,%2
anl ar%1,#%3
mov dptr,%2
movc a,@a+dptr
mov %1,a
-}
+} if notVolatile %1
replace {
anl a,#0x0f
; Peephole 189 removed redundant mov and anl
anl a,#0x0f
mov %1,a
-}
+} if notVolatile %1
// rules 190 & 191 need to be in order
replace {
; Peephole 190 removed redundant mov
mov a,%1
lcall __gptrput
-}
+} if notVolatile %1
replace {
mov %1,a
mov dpl,%2
mov dph,%3
mov b,%4
-}
+} if notVolatile %1
replace {
mov r%1,a
; Peephole 204 removed redundant mov
add a,acc
mov %1,a
-}
+} if notVolatile %1
replace {
djnz %1,%2
mov %1,%1
} by {
; Peephole 206 removed redundant mov %1,%1
-}
+} if notVolatile
replace {
mov a,_bp
xrl %1,#0x80
}
+
replace {
mov %1,a
mov a,%2
mov %1 + %2,(%2 + %1)
} by {
; Peephole 221.a remove redundant move
-}
+} if notVolatile
replace {
mov (%1 + %2 + %3),((%2 + %1) + %3)
} by {
; Peephole 221.b remove redundant move
-}
+} if notVolatile
replace {
dec r%1
; Peephole 223 removed redundant dph/dpl moves
mov %1,dpl
mov %2,dph
-}
+} if notVolatile %1 %2
replace {
mov %1,dpl
; Peephole 224 removed redundant dph/dpl moves
mov %1,dpl
mov (%1 + 1),dph
-}
+} if notVolatile %1
replace {
mov a,%1
mov dpl,%2
mov dph,%3
mov b,%4
-}
+} if notVolatile %1
replace {
clr a
mov a,#%2
movx @dptr,a
} by {
- ; Peephole 230 replaced inefficient 16 constant
+ ; Peephole 230 replaced inefficient 16 bit constant
mov dptr,#%1
mov a,#%2
movx @dptr,a
cjne r%1,#%2,%3
mov a,#0x01
sjmp %4
-%3:
+%3:
clr a
%4:
} by {
sjmp %7
%3:
clr a
-%7:
+%7:
} by {
; Peephole 241.d optimized compare
clr a
%7:
}
-// applies to f.e. j = (k!=0x1000);
+// applies to f.e. j = (k!=0x1000);
// with volatile idata int k;
replace {
cjne @r%1,#%2,%3
cjne @r%1,#%2,%3
mov a,#0x01
sjmp %7
-%3:
+%3:
clr a
%7:
} by {
movx a,@dptr
anl a,#%2&%3
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
movx a,@dptr
orl a,#%2|%3
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
orl a,#%2
anl a,#%3
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
anl a,#%2
orl a,#%3
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
orl a,#%2
anl a,#%3&%4
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
anl a,#%3
orl a,#%4
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
orl a,#%3
anl a,#%4
movx @dptr,a
-}
+} if notVolatile %1
replace {
mov dptr,#%1
anl a,#%2
orl a,#%3|%4
movx @dptr,a
-}
+} if notVolatile %1
mov a,@r%5
anl a,#%2&%3
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
mov a,@r%5
orl a,#%2|%3
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
orl a,#%2
anl a,#%3
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
anl a,#%2
orl a,#%3
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
orl a,#%2
anl a,#%3&%4
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
anl a,#%3
orl a,#%4
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
orl a,#%3
anl a,#%4
mov @r%5,a
-}
+} if notVolatile %1
replace {
mov r%5,#%1
anl a,#%2
orl a,#%3|%4
mov @r%5,a
-}
+} if notVolatile %1
// Peepholes 248.x have to be compatible with the keyword volatile.
xrl a,%4
movx @dptr,a
}
+
+replace {
+ jnz %1
+%1:
+} by {
+ ; Peephole 249a jump optimization
+} if labelRefCount %1 1
+
+replace {
+ jz %1
+%1:
+} by {
+ ; Peephole 249b jump optimization
+} if labelRefCount %1 1
+
+
+// This allows non-interrupt and interrupt code to safely compete
+// for a resource without the non-interrupt code having to disable
+// interrupts:
+// volatile bit resource_is_free;
+// if( resource_is_free ) {
+// resource_is_free=0; do_something; resource_is_free=1;
+// }
+replace {
+ jnb %1,%2
+%3:
+ clr %1
+} by {
+ ; Peephole 250.a using atomic test and clear
+ jbc %1,%3
+ sjmp %2
+%3:
+} if labelRefCount %3 0
+
+replace {
+ jb %1,%2
+ ljmp %3
+%2:
+ clr %1
+} by {
+ ; Peephole 250.b using atomic test and clear
+ jbc %1,%2
+ ljmp %3
+%2:
+} if labelRefCount %2 1
+
+
+// not before peephole 250.b
+replace {
+ ljmp %5
+} by {
+ ; Peephole 251 replaced ljmp to ret with ret
+ ret
+} if labelIsReturnOnly
+
+// applies to shifts.c and when accessing arrays with an unsigned integer index
+// saving 1 byte, 2 cycles
+replace {
+ mov r%1,%2
+ mov a,(%2 + 1)
+ xch a,r%1
+ add a,acc
+ xch a,r%1
+ rlc a
+ mov r%3,a
+} by {
+ ; Peephole 252 optimized left shift
+ mov a,%2
+ add a,acc
+ mov r%1,a
+ mov a,(%2 + 1)
+ rlc a
+ mov r%3,a
+}