5 // ; Peephole 1 removed pop %1 push %1 (not push pop)
13 // ; Peephole 2 removed pop %1 push %1 (not push pop)
18 // added by Jean Louis VERN for
24 ; Peephole 2.a removed redundant xch xch
32 ; Peephole 3.a changed mov to clr
42 ; Peephole 3.b changed mov to clr
48 // saving 1 byte, loosing 1 cycle but maybe allowing peephole 3.b to start
53 ; Peephole 3.c changed mov to clr
66 ; Peephole 100 removed redundant mov
74 ; Peephole 100.a removed redundant mov
84 ; Peephole 101 removed redundant mov
102 ; Peephole 102 removed redundant mov
124 ; Peephole 103 removed redundant mov
141 ; Peephole 104 optimized increment (acc not set to r%1, flags undefined)
150 ; Peephole 105 removed redundant mov
159 ; Peephole 106 removed redundant mov
168 ; Peephole 107 removed redundant ljmp
177 ; Peephole 108 removed ljmp by inverse jump logic
187 ; Peephole 109 removed ljmp by inverse jump logic
197 ; Peephole 110 removed ljmp by inverse jump logic
207 ; Peephole 111 removed ljmp by inverse jump logic
217 ; Peephole 112 removed ljmp by inverse jump logic
226 ; Peephole 244 replaced ljmp to ret with ret
228 } if labelIsReturnOnly
235 ; Peephole 132 changed ljmp to sjmp
249 ; Peephole 113 optimized misc sequence
254 } if labelRefCount %3 1
265 ; Peephole 114 optimized misc sequence
271 } if labelRefCount %3 2
280 ; Peephole 115 jump optimization
284 } if labelRefCount %3 1
294 ; Peephole 116 jump optimization
299 } if labelRefCount %3 2
310 ; Peephole 117 jump optimization
316 } if labelRefCount %3 3
328 ; Peephole 118 jump optimization
335 } if labelRefCount %3 4
344 ; Peephole 119 jump optimization
347 } if labelRefCount %3 1
357 ; Peephole 120 jump optimization
361 } if labelRefCount %3 2
372 ; Peephole 121 jump optimization
377 } if labelRefCount %3 3
389 ; Peephole 122 jump optimization
395 } if labelRefCount %3 4
404 ; Peephole 123 jump optimization
408 } if labelRefCount %3 1
418 ; Peephole 124 jump optimization
423 } if labelRefCount %3 2
434 ; Peephole 125 jump optimization
440 } if labelRefCount %3 3
452 ; Peephole 126 jump optimization
459 } if labelRefCount %3 4
472 ; Peephole 127 removed misc sequence
474 } if labelRefCount %3 0
481 ; Peephole 128 jump optimization
490 ; Peephole 129 jump optimization
497 ; Peephole 130 changed target address mode r%1 to ar%1
507 ; Peephole 131 optimized decrement (not caring for c)
519 ; Peephole 133 removed redundant moves
532 ; Peephole 134 removed redundant moves
543 ; Peephole 135 removed redundant mov
554 ; Peephole 136 removed redundant moves
560 // WTF? Doesn't look sensible to me...
570 // ; Peephole 137 optimized misc jump sequence
574 //} if labelRefCount %4 1
585 // ; Peephole 138 optimized misc jump sequence
590 //} if labelRefCount %4 1
597 ; Peephole 139 removed redundant mov
607 ; Peephole 140 removed redundant mov
616 ; Peephole 141 removed redundant mov
626 ; Peephole 142 removed redundant moves
635 ; Peephole 143 converted rlc to rl
643 ; Peephole 144 converted rrc to rc
651 ; Peephole 145 changed to add without carry
660 ; Peephole 146 changed to add without carry
668 ; Peephole 147 changed target address mode r%1 to ar%1
675 ; Peephole 148 changed target address mode r%1 to ar%1
682 ; Peephole 149 changed target address mode r%1 to ar%1
692 ; Peephole 150 removed misc moves via dpl before return
705 ; Peephole 151 removed misc moves via dph, dpl before return
717 ; Peephole 152 removed misc moves via dph, dpl before return
732 ; Peephole 153 removed misc moves via dph, dpl, b before return
745 ; Peephole 154 removed misc moves via dph, dpl, b before return
759 ; Peephole 155 removed misc moves via dph, dpl, b before return
776 ; Peephole 156 removed misc moves via dph, dpl, b, a before return
791 ; Peephole 157 removed misc moves via dph, dpl, b, a before return
805 ; Peephole 158 removed misc moves via dph, dpl, b, a before return
814 ; Peephole 159 avoided xrl during execution
823 ; Peephole 160 removed sjmp by inverse jump logic
833 ; Peephole 161 removed sjmp by inverse jump logic
843 ; Peephole 162 removed sjmp by inverse jump logic
853 ; Peephole 163 removed sjmp by inverse jump logic
863 ; Peephole 164 removed sjmp by inverse jump logic
873 ; Peephole 165 removed sjmp by inverse jump logic
883 ; Peephole 166 removed redundant mov
893 ; Peephole 167 removed redundant bit moves (c not set to %1)
902 ; Peephole 168 jump optimization
912 ; Peephole 169 jump optimization
924 ; Peephole 170 jump optimization
927 } if labelRefCount %3 1
937 ; Peephole 171 jump optimization
941 } if labelRefCount %3 2
952 ; Peephole 172 jump optimization
957 } if labelRefCount %3 3
969 ; Peephole 173 jump optimization
975 } if labelRefCount %3 4
984 ; Peephole 174 optimized decrement (acc not set to %2, flags undefined)
995 ; Peephole 175 optimized increment (acc not set to %2, flags undefined)
1005 ; Peephole 176 optimized increment, removed redundant mov
1010 // this one will screw assignes to volatile/sfr's
1015 // ; Peephole 177 removed redundant mov
1019 // applies to f.e. scott-add.asm (--model-large)
1024 ; Peephole 177 removed redundant mov
1033 ; Peephole 178 removed redundant mov
1038 // rules 179-182 provided by : Frieder <fe@lake.iup.uni-heidelberg.de>
1039 // saving 2 byte, 1 cycle
1044 ; Peephole 179 changed mov to clr
1049 // saving 1 byte, 0 cycles
1053 ; Peephole 180 changed mov to clr
1057 // saving 3 bytes, 2 cycles
1058 // provided by Bernhard Held <bernhard.held@de.westinghouse.com>
1063 ; Peephole 182.a used 16 bit load of DPTR
1067 // saving 3 byte, 2 cycles, return(NULL) profits here
1072 ; Peephole 182.b used 16 bit load of dptr
1076 // saving 3 byte, 2 cycles. Probably obsoleted by 182.b
1081 ; Peephole 182.c used 16 bit load of dptr
1082 mov dptr,#(((%2)<<8) + %1)
1089 ; Peephole 183 avoided anl during execution
1098 ; Peephole 184 removed redundant mov
1104 // acc being incremented might cause problems
1108 ; Peephole 185 changed order of increment (acc incremented also!)
1133 ; Peephole 186.a optimized movc sequence
1165 ; Peephole 186.b optimized movc sequence
1190 ; Peephole 186.c optimized movc sequence
1208 ; Peephole 186.d optimized movc sequence
1218 ; Peephole 187 used a instead of ar%1 for anl
1230 ; Peephole 188 removed redundant mov
1242 ; Peephole 189 removed redundant mov and anl
1247 // rules 190 & 191 need to be in order
1253 ; Peephole 190 removed redundant mov
1265 ; Peephole 191 removed redundant mov
1276 ; Peephole 192 used a instead of ar%1 as source
1293 ; Peephole 193.a optimized misc jump sequence
1303 } if labelRefCount %3 4
1317 ; Peephole 193.b optimized misc jump sequence
1327 } if labelRefCount %3 4
1341 ; Peephole 193.c optimized misc jump sequence
1351 } if labelRefCount %3 4
1362 ; Peephole 194 optimized misc jump sequence
1369 } if labelRefCount %3 4
1381 ; Peephole 195.a optimized misc jump sequence
1389 } if labelRefCount %3 3
1401 ; Peephole 195.b optimized misc jump sequence
1409 } if labelRefCount %3 3
1421 ; Peephole 195.c optimized misc jump sequence
1429 } if labelRefCount %3 3
1439 ; Peephole 196 optimized misc jump sequence
1445 } if labelRefCount %3 3
1455 ; Peephole 197.a optimized misc jump sequence
1461 } if labelRefCount %3 2
1471 ; Peephole 197.b optimized misc jump sequence
1477 } if labelRefCount %3 2
1487 ; Peephole 197.c optimized misc jump sequence
1493 } if labelRefCount %3 2
1502 ; Peephole 198 optimized misc jump sequence
1507 } if labelRefCount %3 2
1515 ; Peephole 199 optimized misc jump sequence
1519 } if labelRefCount %3 1
1525 ; Peephole 200 removed redundant sjmp
1534 ; Peephole 201 removed redundant sjmp
1544 ; Peephole 202 removed redundant push pop
1553 ; Peephole 203 removed mov r%1,_spx
1562 ; Peephole 204 removed redundant mov
1574 ; Peephole 205 optimized misc jump sequence
1578 } if labelRefCount %2 1
1583 ; Peephole 206 removed redundant mov %1,%1
1591 ; Peephole 207 removed zero add (acc not set to %1, flags undefined)
1600 ; Peephole 208 removed redundant push pop
1610 ; Peephole 209 optimized increment (acc not set to %1, flags undefined)
1616 mov dptr,#((((%1 >> 8)) <<8) + %1)
1618 ; Peephole 210 simplified expression
1626 ; Peephole 211 removed redundant push %1 pop %1
1634 ; Peephole 212 reduced add sequence to inc
1640 mov %1,#(( %2 >> 8 ) ^ 0x80)
1642 ; Peephole 213.a inserted fix
1648 mov %1,#(( %2 + %3 >> 8 ) ^ 0x80)
1650 ; Peephole 213.b inserted fix
1651 mov %1,#((%2 + %3) >> 8)
1660 ; Peephole 214 reduced some extra moves
1663 } if operandsNotSame
1670 ; Peephole 215 removed some moves
1673 } if operandsNotSame
1683 ; Peephole 216 simplified clear (2bytes)
1702 ; Peephole 217 simplified clear (3bytes)
1726 ; Peephole 218 simplified clear (4bytes)
1745 ; Peephole 219 removed redundant clear
1761 ; Peephole 219.a removed redundant clear
1774 ; Peephole 220.a removed bogus DPS set
1782 ; Peephole 220.b removed bogus DPS set
1787 mov %1 + %2,(%2 + %1)
1789 ; Peephole 221.a remove redundant move
1793 mov (%1 + %2 + %3),((%2 + %1) + %3)
1795 ; Peephole 221.b remove redundant move
1802 ; Peephole 222 removed dec/inc pair
1811 ; Peephole 223 removed redundant dph/dpl moves
1822 ; Peephole 224 removed redundant dph/dpl moves
1835 ; Peephole 225 removed redundant move to acc
1851 ; Peephole 226 removed unnecessary clr
1873 ; Peephole 227 replaced inefficient 32 bit clear
1901 ; Peephole 228 replaced inefficient 32 constant
1923 ; Peephole 229 replaced inefficient 16 bit clear
1941 ; Peephole 230 replaced inefficient 16 constant
1951 // this last peephole often removes the last mov from 227-230
1956 ; Peephole 231 removed redundant mov to dptr
1963 ; Peephole 232 using movc to read xdata (--xram-movc)
1971 ; Peephole 233 using _gptrgetc instead of _gptrget (--xram-movc)
1981 ; Peephole 234 loading dpl directly from a(ccumulator), r%1 not set
1994 ; Peephole 235 loading dph directly from a(ccumulator), r%1 not set
2001 // 14 rules by Fiorenzo D. Ramaglia <fd.ramaglia@tin.it>
2006 ; Peephole 236.a used r%1 instead of ar%1
2013 ; Peephole 236.b used r%1 instead of ar%1
2020 ; Peephole 236.c used r%1 instead of ar%1
2027 ; Peephole 236.d used r%1 instead of ar%1
2034 ; Peephole 236.e used r%1 instead of ar%1
2041 ; Peephole 236.f used r%1 instead of ar%1
2048 ; Peephole 236.g used r%1 instead of ar%1
2055 ; Peephole 236.h used r%1 instead of ar%1
2062 ; Peephole 236.i used r%1 instead of ar%1
2069 ; Peephole 236.j used r%1 instead of ar%1
2076 ; Peephole 236.k used r%1 instead of ar%1
2083 ; Peephole 236.l used r%1 instead of ar%1
2090 ; Peephole 236.m used r%1 instead of ar%1
2097 ; Peephole 236.n used r%1 instead of ar%1
2108 ; Peephole 237.a removed sjmp to ret
2125 ; Peephole 237.b removed sjmp to ret
2135 // applies to f.e. device/lib/log10f.c
2161 ; Peephole 238.a removed 4 redundant moves
2162 } if operandsNotSame8 %1 %2 %3 %4 %5 %6 %7 %8
2164 // applies to device/lib/log10f.c
2179 ; Peephole 238.b removed 3 redundant moves
2180 } if operandsNotSame7 %1 %2 %3 %4 %5 %6 %7
2182 // applies to f.e. device/lib/time.c
2198 ; Peephole 238.c removed 2 redundant moves
2199 } if operandsNotSame4 %1 %2 %3 %4
2201 // applies to f.e. support/regression/tests/bug-524209.c
2214 ; Peephole 238.d removed 3 redundant moves
2215 } if operandsNotSame6 %1 %2 %3 %4 %5 %6
2217 // applies to f.e. ser_ir.asm
2221 ; Peephole 239 used a instead of acc
2229 ; Peephole 240 use clr instead of addc a,#0
2234 // peepholes 241.a to 241.c and 241.d to 241.f need to be in order
2246 ; Peephole 241.a optimized compare
2257 // applies to f.e. time.c
2267 ; Peephole 241.b optimized compare
2276 // applies to f.e. malloc.c
2285 ; Peephole 241.c optimized compare
2293 // applies to f.e. j = (k!=0x1000);
2294 // with volatile idata long k;
2309 ; Peephole 241.d optimized compare
2323 // applies to f.e. j = (k!=0x1000);
2324 // with volatile idata int k;
2335 ; Peephole 241.e optimized compare
2345 // applies to f.e. vprintf.asm (--stack-auto)
2354 ; Peephole 241.f optimized compare
2362 // applies to f.e. scott-bool1.c
2369 ; Peephole 242.a avoided branch jnz to jz
2374 } if labelRefCount %1 1
2376 // applies to f.e. scott-bool1.c
2384 ; Peephole 242.b avoided branch jnz to jz
2390 } if labelRefCount %1 1
2392 // applies to f.e. logic.c
2402 ; Peephole 242.c avoided branch jnz to jz
2410 } if labelRefCount %1 1
2412 // applies to f.e. vprintf.c
2413 // this is a rare case, usually the "tail increment" is noticed earlier
2420 ; Peephole 243 avoided branch to sjmp
2427 // applies to f.e. simplefloat.c (saving 1 cycle)
2432 ; Peephole 244.a moving first to a instead of r%1
2437 // applies to f.e. _itoa.c (saving 1 cycle)
2442 ; Peephole 244.b moving first to a instead of r%1
2448 // applies to f.e. bug-460010.c (saving 1 cycle)
2453 ; Peephole 244.c loading dpl from a instead of r%1
2462 ; Peephole 244.d loading dph from a instead of r%1
2467 // this one is safe but disables 245.a 245.b
2468 // please remove 245 if 245.a 245.b are found to be safe
2469 // applies to f.e. scott-compare.c
2480 ; Peephole 245 optimized complement (r%1 and acc set needed?)
2485 } if labelRefCount %2 1
2487 // this one will not be triggered if 245 is present
2488 // please remove 245 if 245.a 245.b are found to be safe
2489 // applies to f.e. vprintf.c
2501 ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
2503 } if labelRefCount %2 1
2505 // this one will not be triggered if 245 is present
2506 // please remove 245 if 245.a 245.b are found to be safe
2507 // applies to f.e. scott-compare.c
2519 ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
2521 } if labelRefCount %2 1