5 // ; Peephole 1 removed pop %1 push %1 (not push pop)
13 // ; Peephole 2 removed pop %1 push %1 (not push pop)
18 // added by Jean Louis VERN for
24 ; Peephole 2.a removed redundant xch xch
32 ; Peephole 3.a changed mov to clr
42 ; Peephole 3.b changed mov to clr
48 // saving 1 byte, loosing 1 cycle but maybe allowing peephole 3.b to start
53 ; Peephole 3.c changed mov to clr
66 ; Peephole 100 removed redundant mov
75 ; Peephole 100.a removed redundant mov
85 ; Peephole 101 removed redundant mov
103 ; Peephole 102 removed redundant mov
125 ; Peephole 103 removed redundant mov
142 ; Peephole 104 optimized increment (acc not set to r%1, flags undefined)
151 ; Peephole 105 removed redundant mov
160 ; Peephole 106 removed redundant mov
169 ; Peephole 107 removed redundant ljmp
178 ; Peephole 108 removed ljmp by inverse jump logic
188 ; Peephole 109 removed ljmp by inverse jump logic
198 ; Peephole 110 removed ljmp by inverse jump logic
208 ; Peephole 111 removed ljmp by inverse jump logic
218 ; Peephole 112.a removed ljmp by inverse jump logic
227 ; Peephole 112.b changed ljmp to sjmp
241 ; Peephole 113 optimized misc sequence
246 } if labelRefCount %3 1
257 ; Peephole 114 optimized misc sequence
263 } if labelRefCount %3 2
272 ; Peephole 115 jump optimization
276 } if labelRefCount %3 1
286 ; Peephole 116 jump optimization
291 } if labelRefCount %3 2
302 ; Peephole 117 jump optimization
308 } if labelRefCount %3 3
320 ; Peephole 118 jump optimization
327 } if labelRefCount %3 4
336 ; Peephole 119 jump optimization
339 } if labelRefCount %3 1
349 ; Peephole 120 jump optimization
353 } if labelRefCount %3 2
364 ; Peephole 121 jump optimization
369 } if labelRefCount %3 3
381 ; Peephole 122 jump optimization
387 } if labelRefCount %3 4
396 ; Peephole 123 jump optimization
400 } if labelRefCount %3 1
410 ; Peephole 124 jump optimization
415 } if labelRefCount %3 2
426 ; Peephole 125 jump optimization
432 } if labelRefCount %3 3
444 ; Peephole 126 jump optimization
451 } if labelRefCount %3 4
464 ; Peephole 127 removed misc sequence
466 } if labelRefCount %3 0
473 ; Peephole 128 jump optimization
482 ; Peephole 129 jump optimization
489 ; Peephole 130 changed target address mode r%1 to ar%1
499 ; Peephole 131 optimized decrement (not caring for c)
511 ; Peephole 133 removed redundant moves
524 ; Peephole 134 removed redundant moves
535 ; Peephole 135 removed redundant mov
546 ; Peephole 136 removed redundant moves
552 // WTF? Doesn't look sensible to me...
562 // ; Peephole 137 optimized misc jump sequence
566 //} if labelRefCount %4 1
577 // ; Peephole 138 optimized misc jump sequence
582 //} if labelRefCount %4 1
589 ; Peephole 139 removed redundant mov
599 ; Peephole 140 removed redundant mov
608 ; Peephole 141 removed redundant mov
618 ; Peephole 142 removed redundant moves
627 ; Peephole 143 converted rlc to rl
635 ; Peephole 144 converted rrc to rc
643 ; Peephole 145 changed to add without carry
652 ; Peephole 146 changed to add without carry
660 ; Peephole 147 changed target address mode r%1 to ar%1
667 ; Peephole 148 changed target address mode r%1 to ar%1
674 ; Peephole 149 changed target address mode r%1 to ar%1
684 ; Peephole 150 removed misc moves via dpl before return
697 ; Peephole 151 removed misc moves via dph, dpl before return
709 ; Peephole 152 removed misc moves via dph, dpl before return
724 ; Peephole 153 removed misc moves via dph, dpl, b before return
737 ; Peephole 154 removed misc moves via dph, dpl, b before return
751 ; Peephole 155 removed misc moves via dph, dpl, b before return
768 ; Peephole 156 removed misc moves via dph, dpl, b, a before return
783 ; Peephole 157 removed misc moves via dph, dpl, b, a before return
797 ; Peephole 158 removed misc moves via dph, dpl, b, a before return
806 ; Peephole 159 avoided xrl during execution
815 ; Peephole 160 removed sjmp by inverse jump logic
825 ; Peephole 161 removed sjmp by inverse jump logic
835 ; Peephole 162 removed sjmp by inverse jump logic
845 ; Peephole 163 removed sjmp by inverse jump logic
855 ; Peephole 164 removed sjmp by inverse jump logic
865 ; Peephole 165 removed sjmp by inverse jump logic
875 ; Peephole 166 removed redundant mov
878 } if notVolatile %1 %2
885 ; Peephole 167 removed redundant bit moves (c not set to %1)
894 ; Peephole 168 jump optimization
904 ; Peephole 169 jump optimization
916 ; Peephole 170 jump optimization
919 } if labelRefCount %3 1
929 ; Peephole 171 jump optimization
933 } if labelRefCount %3 2
944 ; Peephole 172 jump optimization
949 } if labelRefCount %3 3
961 ; Peephole 173 jump optimization
967 } if labelRefCount %3 4
976 ; Peephole 174 optimized decrement (acc not set to %2, flags undefined)
987 ; Peephole 175 optimized increment (acc not set to %2, flags undefined)
997 ; Peephole 176 optimized increment, removed redundant mov
1002 // this one will screw assignes to volatile/sfr's
1007 ; Peephole 177 removed redundant mov
1009 } if notVolatile %1 %2
1011 // applies to f.e. scott-add.asm (--model-large)
1016 ; Peephole 177 removed redundant mov
1025 ; Peephole 178 removed redundant mov
1030 // rules 179-182 provided by : Frieder <fe@lake.iup.uni-heidelberg.de>
1031 // saving 2 byte, 1 cycle
1036 ; Peephole 179 changed mov to clr
1042 // volatile xdata char t; t=0x01; t=0x03;
1049 ; Peephole 180.a removed redundant mov to dptr
1055 // volatile xdata char t; t=0x01; t=0x03; t=0x01;
1064 ; Peephole 180.b removed redundant mov to dptr
1072 // saving 1 byte, 0 cycles
1076 ; Peephole 181 changed mov to clr
1080 // saving 3 bytes, 2 cycles
1081 // provided by Bernhard Held <bernhard.held@de.westinghouse.com>
1086 ; Peephole 182.a used 16 bit load of DPTR
1090 // saving 3 byte, 2 cycles, return(NULL) profits here
1095 ; Peephole 182.b used 16 bit load of dptr
1099 // saving 3 byte, 2 cycles. Probably obsoleted by 182.b
1104 ; Peephole 182.c used 16 bit load of dptr
1105 mov dptr,#(((%2)<<8) + %1)
1112 ; Peephole 183 avoided anl during execution
1121 ; Peephole 184 removed redundant mov
1127 // acc being incremented might cause problems
1131 ; Peephole 185 changed order of increment (acc incremented also!)
1156 ; Peephole 186.a optimized movc sequence
1188 ; Peephole 186.b optimized movc sequence
1213 ; Peephole 186.c optimized movc sequence
1222 // char indexed access to: char code table[] = {4,3,2,1};
1232 ; Peephole 186.d optimized movc sequence
1237 // char indexed access to: int code table[] = {4,3,2,1};
1252 ; Peephole 186.e optimized movc sequence (b, dptr differ)
1270 ; Peephole 187 used a instead of ar%1 for anl
1282 ; Peephole 188 removed redundant mov
1294 ; Peephole 189 removed redundant mov and anl
1299 // rules 190 & 191 need to be in order
1305 ; Peephole 190 removed redundant mov
1317 ; Peephole 191 removed redundant mov
1328 ; Peephole 192 used a instead of ar%1 as source
1345 ; Peephole 193.a optimized misc jump sequence
1355 } if labelRefCount %3 4
1369 ; Peephole 193.b optimized misc jump sequence
1379 } if labelRefCount %3 4
1393 ; Peephole 193.c optimized misc jump sequence
1403 } if labelRefCount %3 4
1414 ; Peephole 194 optimized misc jump sequence
1421 } if labelRefCount %3 4
1433 ; Peephole 195.a optimized misc jump sequence
1441 } if labelRefCount %3 3
1453 ; Peephole 195.b optimized misc jump sequence
1461 } if labelRefCount %3 3
1473 ; Peephole 195.c optimized misc jump sequence
1481 } if labelRefCount %3 3
1491 ; Peephole 196 optimized misc jump sequence
1497 } if labelRefCount %3 3
1507 ; Peephole 197.a optimized misc jump sequence
1513 } if labelRefCount %3 2
1523 ; Peephole 197.b optimized misc jump sequence
1529 } if labelRefCount %3 2
1539 ; Peephole 197.c optimized misc jump sequence
1545 } if labelRefCount %3 2
1554 ; Peephole 198 optimized misc jump sequence
1559 } if labelRefCount %3 2
1567 ; Peephole 199 optimized misc jump sequence
1571 } if labelRefCount %3 1
1577 ; Peephole 200 removed redundant sjmp
1586 ; Peephole 201 removed redundant sjmp
1596 ; Peephole 202 removed redundant push pop
1605 ; Peephole 203 removed mov r%1,_spx
1614 ; Peephole 204 removed redundant mov
1626 ; Peephole 205 optimized misc jump sequence
1630 } if labelRefCount %2 1
1635 ; Peephole 206 removed redundant mov %1,%1
1643 ; Peephole 207 removed zero add (acc not set to %1, flags undefined)
1652 ; Peephole 208 removed redundant push pop
1662 ; Peephole 209 optimized increment (acc not set to %1, flags undefined)
1668 mov dptr,#((((%1 >> 8)) <<8) + %1)
1670 ; Peephole 210 simplified expression
1678 ; Peephole 211 removed redundant push %1 pop %1
1686 ; Peephole 212 reduced add sequence to inc
1692 mov %1,#(( %2 >> 8 ) ^ 0x80)
1694 ; Peephole 213.a inserted fix
1700 mov %1,#(( %2 + %3 >> 8 ) ^ 0x80)
1702 ; Peephole 213.b inserted fix
1703 mov %1,#((%2 + %3) >> 8)
1713 ; Peephole 214 reduced some extra moves
1716 } if operandsNotSame
1723 ; Peephole 215 removed some moves
1726 } if operandsNotSame
1736 ; Peephole 216 simplified clear (2bytes)
1755 ; Peephole 217 simplified clear (3bytes)
1779 ; Peephole 218 simplified clear (4bytes)
1798 ; Peephole 219 removed redundant clear
1814 ; Peephole 219.a removed redundant clear
1827 ; Peephole 220.a removed bogus DPS set
1835 ; Peephole 220.b removed bogus DPS set
1840 mov %1 + %2,(%2 + %1)
1842 ; Peephole 221.a remove redundant move
1846 mov (%1 + %2 + %3),((%2 + %1) + %3)
1848 ; Peephole 221.b remove redundant move
1855 ; Peephole 222 removed dec/inc pair
1864 ; Peephole 223 removed redundant dph/dpl moves
1867 } if notVolatile %1 %2
1875 ; Peephole 224 removed redundant dph/dpl moves
1888 ; Peephole 225 removed redundant move to acc
1904 ; Peephole 226 removed unnecessary clr
1926 ; Peephole 227 replaced inefficient 32 bit clear
1954 ; Peephole 228 replaced inefficient 32 constant
1976 ; Peephole 229 replaced inefficient 16 bit clear
1994 ; Peephole 230 replaced inefficient 16 bit constant
2004 // this last peephole often removes the last mov from 227-230
2009 ; Peephole 231 removed redundant mov to dptr
2016 ; Peephole 232 using movc to read xdata (--xram-movc)
2024 ; Peephole 233 using _gptrgetc instead of _gptrget (--xram-movc)
2034 ; Peephole 234 loading dpl directly from a(ccumulator), r%1 not set
2047 ; Peephole 235 loading dph directly from a(ccumulator), r%1 not set
2054 // 14 rules by Fiorenzo D. Ramaglia <fd.ramaglia@tin.it>
2059 ; Peephole 236.a used r%1 instead of ar%1
2066 ; Peephole 236.b used r%1 instead of ar%1
2073 ; Peephole 236.c used r%1 instead of ar%1
2080 ; Peephole 236.d used r%1 instead of ar%1
2087 ; Peephole 236.e used r%1 instead of ar%1
2094 ; Peephole 236.f used r%1 instead of ar%1
2101 ; Peephole 236.g used r%1 instead of ar%1
2108 ; Peephole 236.h used r%1 instead of ar%1
2115 ; Peephole 236.i used r%1 instead of ar%1
2122 ; Peephole 236.j used r%1 instead of ar%1
2129 ; Peephole 236.k used r%1 instead of ar%1
2136 ; Peephole 236.l used r%1 instead of ar%1
2143 ; Peephole 236.m used r%1 instead of ar%1
2150 ; Peephole 236.n used r%1 instead of ar%1
2161 ; Peephole 237.a removed sjmp to ret
2178 ; Peephole 237.b removed sjmp to ret
2188 // applies to f.e. device/lib/log10f.c
2214 ; Peephole 238.a removed 4 redundant moves
2215 } if operandsNotSame8 %1 %2 %3 %4 %5 %6 %7 %8
2217 // applies to device/lib/log10f.c
2232 ; Peephole 238.b removed 3 redundant moves
2233 } if operandsNotSame7 %1 %2 %3 %4 %5 %6 %7
2235 // applies to f.e. device/lib/time.c
2251 ; Peephole 238.c removed 2 redundant moves
2252 } if operandsNotSame4 %1 %2 %3 %4
2254 // applies to f.e. support/regression/tests/bug-524209.c
2267 ; Peephole 238.d removed 3 redundant moves
2268 } if operandsNotSame6 %1 %2 %3 %4 %5 %6
2270 // applies to f.e. ser_ir.asm
2274 ; Peephole 239 used a instead of acc
2282 ; Peephole 240 use clr instead of addc a,#0
2287 // peepholes 241.a to 241.c and 241.d to 241.f need to be in order
2299 ; Peephole 241.a optimized compare
2310 // applies to f.e. time.c
2320 ; Peephole 241.b optimized compare
2329 // applies to f.e. malloc.c
2338 ; Peephole 241.c optimized compare
2346 // applies to f.e. j = (k!=0x1000);
2347 // with volatile idata long k;
2362 ; Peephole 241.d optimized compare
2376 // applies to f.e. j = (k!=0x1000);
2377 // with volatile idata int k;
2388 ; Peephole 241.e optimized compare
2398 // applies to f.e. vprintf.asm (--stack-auto)
2407 ; Peephole 241.f optimized compare
2415 // applies to f.e. scott-bool1.c
2422 ; Peephole 242.a avoided branch jnz to jz
2427 } if labelRefCount %1 1
2429 // applies to f.e. scott-bool1.c
2437 ; Peephole 242.b avoided branch jnz to jz
2443 } if labelRefCount %1 1
2445 // applies to f.e. logic.c
2455 ; Peephole 242.c avoided branch jnz to jz
2463 } if labelRefCount %1 1
2465 // applies to f.e. vprintf.c
2466 // this is a rare case, usually the "tail increment" is noticed earlier
2473 ; Peephole 243 avoided branch to sjmp
2480 // applies to f.e. simplefloat.c (saving 1 cycle)
2485 ; Peephole 244.a moving first to a instead of r%1
2490 // applies to f.e. _itoa.c (saving 1 cycle)
2495 ; Peephole 244.b moving first to a instead of r%1
2501 // applies to f.e. bug-460010.c (saving 1 cycle)
2506 ; Peephole 244.c loading dpl from a instead of r%1
2515 ; Peephole 244.d loading dph from a instead of r%1
2520 // this one is safe but disables 245.a 245.b
2521 // please remove 245 if 245.a 245.b are found to be safe
2522 // applies to f.e. scott-compare.c
2533 ; Peephole 245 optimized complement (r%1 and acc set needed?)
2538 } if labelRefCount %2 1
2540 // this one will not be triggered if 245 is present
2541 // please remove 245 if 245.a 245.b are found to be safe
2542 // applies to f.e. vprintf.c
2554 ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
2556 } if labelRefCount %2 1
2558 // this one will not be triggered if 245 is present
2559 // please remove 245 if 245.a 245.b are found to be safe
2560 // applies to f.e. scott-compare.c
2572 ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
2574 } if labelRefCount %2 1
2577 // rules 246.x apply to f.e. bitfields.c
2588 ; Peephole 246.a combined clr/clr
2605 ; Peephole 246.b combined set/set
2622 ; Peephole 246.c combined set/clr
2640 ; Peephole 246.d combined clr/set
2659 ; Peephole 246.e combined set/clr/clr
2678 ; Peephole 246.f combined set/clr/set
2698 ; Peephole 246.g combined clr/set/clr
2718 ; Peephole 246.h combined clr/set/set
2729 // rules 247.x apply to f.e. bitfields.c
2740 ; Peephole 247.a combined clr/clr
2757 ; Peephole 247.b combined set/set
2774 ; Peephole 247.c combined set/clr
2792 ; Peephole 247.d combined clr/set
2811 ; Peephole 247.e combined set/clr/clr
2830 ; Peephole 247.f combined set/clr/set
2850 ; Peephole 247.g combined clr/set/clr
2870 ; Peephole 247.h combined clr/set/set
2879 // Peepholes 248.x have to be compatible with the keyword volatile.
2880 // They optimize typical accesses to memory mapped I/O devices:
2881 // volatile xdata char t; t|=0x01;
2891 ; Peephole 248.a optimized or to xdata
2899 // volatile xdata char t; t&=0x01;
2909 ; Peephole 248.b optimized and to xdata
2917 // volatile xdata char t; t^=0x01;
2927 ; Peephole 248.c optimized xor to xdata
2935 // volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
2955 ; Peephole 248.d optimized or/and/or to volatile xdata
2969 // volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
2989 ; Peephole 248.e optimized and/or/and to volatile xdata
3003 // volatile xdata char t; t|=0x01; t&=~0x01;
3017 ; Peephole 248.f optimized or/and to volatile xdata
3028 // volatile xdata char t; t&=~0x01; t|=0x01;
3042 ; Peephole 248.g optimized and/or to volatile xdata
3053 // volatile xdata char t; t^=0x01; t^=0x01;
3067 ; Peephole 248.h optimized xor/xor to volatile xdata
3082 ; Peephole 249a jump optimization
3083 } if labelRefCount %1 1
3089 ; Peephole 249b jump optimization
3090 } if labelRefCount %1 1
3093 // This allows non-interrupt and interrupt code to safely compete
3094 // for a resource without the non-interrupt code having to disable
3096 // volatile bit resource_is_free;
3097 // if( resource_is_free ) {
3098 // resource_is_free=0; do_something; resource_is_free=1;
3105 ; Peephole 250.a using atomic test and clear
3109 } if labelRefCount %3 0
3117 ; Peephole 250.b using atomic test and clear
3121 } if labelRefCount %2 1
3124 // not before peephole 250.b
3128 ; Peephole 251 replaced ljmp to ret with ret
3130 } if labelIsReturnOnly