5 // ; Peephole 1 removed pop %1 push %1 (not push pop)
13 // ; Peephole 2 removed pop %1 push %1 (not push pop)
18 // added by Jean Louis VERN for
24 ; Peephole 2.a removed redundant xch xch
32 ; Peephole 3.a changed mov to clr
42 ; Peephole 3.b changed mov to clr
48 // saving 1 byte, loosing 1 cycle but maybe allowing peephole 3.b to start
53 ; Peephole 3.c changed mov to clr
69 ; Peephole 3.d removed redundant clr
83 ; Peephole 3.e removed redundant clr
99 ; Peephole 3.f removed redundant clr
117 ; Peephole 3.g removed redundant clr
126 ; Peephole 100 removed redundant mov
139 ; Peephole 101 removed redundant mov
157 ; Peephole 102 removed redundant mov
179 ; Peephole 103 removed redundant mov
196 ; Peephole 104 optimized increment (acc not set to r%1, flags undefined)
205 ; Peephole 105 removed redundant mov
214 ; Peephole 106 removed redundant mov
223 ; Peephole 107 removed redundant ljmp
232 ; Peephole 108 removed ljmp by inverse jump logic
242 ; Peephole 109 removed ljmp by inverse jump logic
252 ; Peephole 110 removed ljmp by inverse jump logic
262 ; Peephole 111 removed ljmp by inverse jump logic
272 ; Peephole 112.a removed ljmp by inverse jump logic
281 ; Peephole 112.b changed ljmp to sjmp
294 ; Peephole 113 optimized misc sequence
299 } if labelRefCount %3 1
310 ; Peephole 114 optimized misc sequence
316 } if labelRefCount %3 2
325 ; Peephole 115.a jump optimization
329 } if labelRefCount %3 1
337 ; Peephole 115.b jump optimization
351 ; Peephole 116 jump optimization
356 } if labelRefCount %3 2
367 ; Peephole 117 jump optimization
373 } if labelRefCount %3 3
385 ; Peephole 118 jump optimization
392 } if labelRefCount %3 4
401 ; Peephole 119 jump optimization
404 } if labelRefCount %3 1
414 ; Peephole 120 jump optimization
418 } if labelRefCount %3 2
429 ; Peephole 121 jump optimization
434 } if labelRefCount %3 3
446 ; Peephole 122 jump optimization
452 } if labelRefCount %3 4
461 ; Peephole 123 jump optimization
465 } if labelRefCount %3 1
475 ; Peephole 124 jump optimization
480 } if labelRefCount %3 2
491 ; Peephole 125 jump optimization
497 } if labelRefCount %3 3
509 ; Peephole 126 jump optimization
516 } if labelRefCount %3 4
529 ; Peephole 127 removed misc sequence
531 } if labelRefCount %3 0
538 ; Peephole 128 jump optimization
547 ; Peephole 129 jump optimization
554 ; Peephole 130 changed target address mode r%1 to ar%1
564 ; Peephole 131 optimized decrement (not caring for c)
576 ; Peephole 133 removed redundant moves
589 ; Peephole 134 removed redundant moves
600 ; Peephole 135 removed redundant mov
611 ; Peephole 136 removed redundant moves
617 // WTF? Doesn't look sensible to me...
627 // ; Peephole 137 optimized misc jump sequence
631 //} if labelRefCount %4 1
642 // ; Peephole 138 optimized misc jump sequence
647 //} if labelRefCount %4 1
654 ; Peephole 139 removed redundant mov
664 ; Peephole 140 removed redundant mov
673 ; Peephole 141 removed redundant mov
683 ; Peephole 142 removed redundant moves
692 ; Peephole 143 converted rlc to rl
700 ; Peephole 144 converted rrc to rc
708 ; Peephole 145 changed to add without carry
717 ; Peephole 146 changed to add without carry
725 ; Peephole 147 changed target address mode r%1 to ar%1
732 ; Peephole 148 changed target address mode r%1 to ar%1
739 ; Peephole 149 changed target address mode r%1 to ar%1
749 ; Peephole 150 removed misc moves via dpl before return
762 ; Peephole 151 removed misc moves via dph, dpl before return
774 ; Peephole 152 removed misc moves via dph, dpl before return
789 ; Peephole 153 removed misc moves via dph, dpl, b before return
802 ; Peephole 154 removed misc moves via dph, dpl, b before return
816 ; Peephole 155 removed misc moves via dph, dpl, b before return
833 ; Peephole 156 removed misc moves via dph, dpl, b, a before return
848 ; Peephole 157 removed misc moves via dph, dpl, b, a before return
862 ; Peephole 158 removed misc moves via dph, dpl, b, a before return
871 ; Peephole 159 avoided xrl during execution
880 ; Peephole 160 removed sjmp by inverse jump logic
890 ; Peephole 161 removed sjmp by inverse jump logic
900 ; Peephole 162 removed sjmp by inverse jump logic
910 ; Peephole 163 removed sjmp by inverse jump logic
920 ; Peephole 164 removed sjmp by inverse jump logic
930 ; Peephole 165 removed sjmp by inverse jump logic
940 ; Peephole 166 removed redundant mov
943 } if notVolatile %1 %2
950 ; Peephole 167 removed redundant bit moves (c not set to %1)
959 ; Peephole 168 jump optimization
969 ; Peephole 169 jump optimization
981 ; Peephole 170 jump optimization
984 } if labelRefCount %3 1
994 ; Peephole 171 jump optimization
998 } if labelRefCount %3 2
1009 ; Peephole 172 jump optimization
1014 } if labelRefCount %3 3
1026 ; Peephole 173 jump optimization
1032 } if labelRefCount %3 4
1041 ; Peephole 174 optimized decrement (acc not set to %2, flags undefined)
1052 ; Peephole 175 optimized increment (acc not set to %2, flags undefined)
1062 ; Peephole 176 optimized increment, removed redundant mov
1067 // this one will screw assignes to volatile/sfr's
1072 ; Peephole 177.a removed redundant mov
1074 } if notVolatile %1 %2
1076 // applies to f.e. scott-add.asm (--model-large)
1081 ; Peephole 177.b removed redundant mov
1085 // applies to f.e. bug-408972.c
1090 ; Peephole 177.c removed redundant move
1092 } if notVolatile %1 %2
1094 // applies to f.e. bug-408972.c
1095 // not before peephole 177.c
1101 ; Peephole 177.d removed redundant move
1104 } if notVolatile(%1 %2),operandsNotRelated(%1 %3)
1106 // applies to f.e. bug-607243.c
1107 // also check notVolatile %3, as it will return FALSE if it's @r%1
1112 ; peephole 177.e removed redundant move
1114 } if notVolatile %2 %3
1120 ; peephole 177.f removed redundant move
1128 ; peephole 177.g optimized mov sequence
1137 ; peephole 177.h optimized mov sequence
1147 ; Peephole 178 removed redundant mov
1152 // rules 179-182 provided by : Frieder <fe@lake.iup.uni-heidelberg.de>
1153 // saving 2 byte, 1 cycle
1158 ; Peephole 179 changed mov to clr
1164 // volatile xdata char t; t=0x01; t=0x03;
1171 ; Peephole 180.a removed redundant mov to dptr
1177 // volatile xdata char t; t=0x01; t=0x03; t=0x01;
1186 ; Peephole 180.b removed redundant mov to dptr
1194 // saving 1 byte, 0 cycles
1198 ; Peephole 181 changed mov to clr
1202 // saving 3 bytes, 2 cycles
1203 // provided by Bernhard Held <bernhard.held@de.westinghouse.com>
1208 ; Peephole 182.a used 16 bit load of DPTR
1212 // saving 3 byte, 2 cycles, return(NULL) profits here
1217 ; Peephole 182.b used 16 bit load of dptr
1221 // saving 3 byte, 2 cycles. Probably obsoleted by 182.b
1226 ; Peephole 182.c used 16 bit load of dptr
1227 mov dptr,#(((%2)<<8) + %1)
1230 // applies to return 0.0; in f.e. sincosf.c
1236 ; Peephole 182.d used 16 bit load of dptr
1237 mov dptr,#(%1&0x00ff)
1245 ; Peephole 183 avoided anl during execution
1254 ; Peephole 184 removed redundant mov
1260 // acc being incremented might cause problems
1264 ; Peephole 185 changed order of increment (acc incremented also!)
1289 ; Peephole 186.a optimized movc sequence
1322 ; Peephole 186.b optimized movc sequence
1347 ; Peephole 186.c optimized movc sequence
1356 // char indexed access to: char code table[] = {4,3,2,1};
1366 ; Peephole 186.d optimized movc sequence
1371 // char indexed access to: int code table[] = {4,3,2,1};
1386 ; Peephole 186.e optimized movc sequence (b, dptr differ)
1404 ; Peephole 187 used a instead of ar%1 for anl
1416 ; Peephole 188 removed redundant mov
1428 ; Peephole 189 removed redundant mov and anl
1433 // rules 190 & 191 need to be in order
1439 ; Peephole 190 removed redundant mov
1451 ; Peephole 191 removed redundant mov
1462 ; Peephole 192 used a instead of ar%1 as source
1479 ; Peephole 193.a optimized misc jump sequence
1489 } if labelRefCount %3 4
1503 ; Peephole 193.b optimized misc jump sequence
1513 } if labelRefCount %3 4
1527 ; Peephole 193.c optimized misc jump sequence
1537 } if labelRefCount %3 4
1548 ; Peephole 194 optimized misc jump sequence
1555 } if labelRefCount %3 4
1567 ; Peephole 195.a optimized misc jump sequence
1575 } if labelRefCount %3 3
1587 ; Peephole 195.b optimized misc jump sequence
1595 } if labelRefCount %3 3
1607 ; Peephole 195.c optimized misc jump sequence
1615 } if labelRefCount %3 3
1625 ; Peephole 196 optimized misc jump sequence
1631 } if labelRefCount %3 3
1641 ; Peephole 197.a optimized misc jump sequence
1647 } if labelRefCount %3 2
1657 ; Peephole 197.b optimized misc jump sequence
1663 } if labelRefCount %3 2
1673 ; Peephole 197.c optimized misc jump sequence
1679 } if labelRefCount %3 2
1688 ; Peephole 198 optimized misc jump sequence
1693 } if labelRefCount %3 2
1701 ; Peephole 199 optimized misc jump sequence
1705 } if labelRefCount %3 1
1711 ; Peephole 200 removed redundant sjmp
1720 ; Peephole 201 removed redundant sjmp
1730 ; Peephole 202 removed redundant push pop
1739 ; Peephole 203 removed mov r%1,_spx
1748 ; Peephole 204 removed redundant mov
1760 ; Peephole 205 optimized misc jump sequence
1764 } if labelRefCount %2 1
1769 ; Peephole 206 removed redundant mov %1,%1
1777 ; Peephole 207 removed zero add (acc not set to %1, flags undefined)
1786 ; Peephole 208 removed redundant push pop
1796 ; Peephole 209 optimized increment (acc not set to %1, flags undefined)
1802 mov dptr,#((((%1 >> 8)) <<8) + %1)
1804 ; Peephole 210 simplified expression
1812 ; Peephole 211 removed redundant push %1 pop %1
1820 ; Peephole 212 reduced add sequence to inc
1826 mov %1,#(( %2 >> 8 ) ^ 0x80)
1828 ; Peephole 213.a inserted fix
1834 mov %1,#(( %2 + %3 >> 8 ) ^ 0x80)
1836 ; Peephole 213.b inserted fix
1837 mov %1,#((%2 + %3) >> 8)
1847 ; Peephole 214 reduced some extra moves
1850 } if operandsNotSame
1857 ; Peephole 215 removed some moves
1860 } if operandsNotSame
1870 ; Peephole 216 simplified clear (2bytes)
1889 ; Peephole 217 simplified clear (3bytes)
1913 ; Peephole 218 simplified clear (4bytes)
1932 ; Peephole 219 removed redundant clear
1948 ; Peephole 219.a removed redundant clear
1961 ; Peephole 220.a removed bogus DPS set
1969 ; Peephole 220.b removed bogus DPS set
1974 mov %1 + %2,(%2 + %1)
1976 ; Peephole 221.a remove redundant move
1980 mov (%1 + %2 + %3),((%2 + %1) + %3)
1982 ; Peephole 221.b remove redundant move
1989 ; Peephole 222 removed dec/inc pair
1998 ; Peephole 223 removed redundant dph/dpl moves
2001 } if notVolatile %1 %2
2009 ; Peephole 224 removed redundant dph/dpl moves
2022 ; Peephole 225 removed redundant move to acc
2038 ; Peephole 226 removed unnecessary clr
2060 ; Peephole 227 replaced inefficient 32 bit clear
2088 ; Peephole 228 replaced inefficient 32 constant
2110 ; Peephole 229 replaced inefficient 16 bit clear
2128 ; Peephole 230 replaced inefficient 16 bit constant
2138 // this last peephole often removes the last mov from 227-230
2143 ; Peephole 231 removed redundant mov to dptr
2150 ; Peephole 232 using movc to read xdata (--xram-movc)
2158 ; Peephole 233 using _gptrgetc instead of _gptrget (--xram-movc)
2168 ; Peephole 234 loading dpl directly from a(ccumulator), r%1 not set
2181 ; Peephole 235 loading dph directly from a(ccumulator), r%1 not set
2188 // 14 rules by Fiorenzo D. Ramaglia <fd.ramaglia@tin.it>
2193 ; Peephole 236.a used r%1 instead of ar%1
2200 ; Peephole 236.b used r%1 instead of ar%1
2207 ; Peephole 236.c used r%1 instead of ar%1
2214 ; Peephole 236.d used r%1 instead of ar%1
2221 ; Peephole 236.e used r%1 instead of ar%1
2228 ; Peephole 236.f used r%1 instead of ar%1
2235 ; Peephole 236.g used r%1 instead of ar%1
2242 ; Peephole 236.h used r%1 instead of ar%1
2249 ; Peephole 236.i used r%1 instead of ar%1
2256 ; Peephole 236.j used r%1 instead of ar%1
2263 ; Peephole 236.k used r%1 instead of ar%1
2270 ; Peephole 236.l used r%1 instead of ar%1
2277 ; Peephole 236.m used r%1 instead of ar%1
2284 ; Peephole 236.n used r%1 instead of ar%1
2295 ; Peephole 237.a removed sjmp to ret
2312 ; Peephole 237.b removed sjmp to ret
2322 // applies to f.e. device/lib/log10f.c
2348 ; Peephole 238.a removed 4 redundant moves
2349 } if operandsNotSame8 %1 %2 %3 %4 %5 %6 %7 %8
2351 // applies to device/lib/log10f.c
2366 ; Peephole 238.b removed 3 redundant moves
2367 } if operandsNotSame7 %1 %2 %3 %4 %5 %6 %7
2369 // applies to f.e. device/lib/time.c
2385 ; Peephole 238.c removed 2 redundant moves
2386 } if operandsNotSame4 %1 %2 %3 %4
2388 // applies to f.e. support/regression/tests/bug-524209.c
2401 ; Peephole 238.d removed 3 redundant moves
2402 } if operandsNotSame6 %1 %2 %3 %4 %5 %6
2404 // applies to f.e. ser_ir.asm
2408 ; Peephole 239 used a instead of acc
2416 ; Peephole 240 use clr instead of addc a,#0
2421 // peepholes 241.a to 241.c and 241.d to 241.f need to be in order
2433 ; Peephole 241.a optimized compare
2444 // applies to f.e. time.c
2454 ; Peephole 241.b optimized compare
2463 // applies to f.e. malloc.c
2472 ; Peephole 241.c optimized compare
2480 // applies to f.e. j = (k!=0x1000);
2481 // with volatile idata long k;
2496 ; Peephole 241.d optimized compare
2510 // applies to f.e. j = (k!=0x1000);
2511 // with volatile idata int k;
2522 ; Peephole 241.e optimized compare
2532 // applies to f.e. vprintf.asm (--stack-auto)
2541 ; Peephole 241.f optimized compare
2549 // applies to f.e. scott-bool1.c
2556 ; Peephole 242.a avoided branch jnz to jz
2561 } if labelRefCount %1 1
2563 // applies to f.e. scott-bool1.c
2571 ; Peephole 242.b avoided branch jnz to jz
2577 } if labelRefCount %1 1
2579 // applies to f.e. logic.c
2589 ; Peephole 242.c avoided branch jnz to jz
2597 } if labelRefCount %1 1
2599 // applies to f.e. vprintf.c
2600 // this is a rare case, usually the "tail increment" is noticed earlier
2607 ; Peephole 243 avoided branch to sjmp
2614 // applies to f.e. simplefloat.c (saving 1 cycle)
2619 ; Peephole 244.a moving first to a instead of r%1
2624 // applies to f.e. _itoa.c (saving 1 cycle)
2629 ; Peephole 244.b moving first to a instead of r%1
2635 // applies to f.e. bug-460010.c (saving 1 cycle)
2640 ; Peephole 244.c loading dpl from a instead of r%1
2649 ; Peephole 244.d loading dph from a instead of r%1
2654 // this one is safe but disables 245.a 245.b
2655 // please remove 245 if 245.a 245.b are found to be safe
2656 // applies to f.e. scott-compare.c
2667 ; Peephole 245 optimized complement (r%1 and acc set needed?)
2672 } if labelRefCount %2 1
2674 // this one will not be triggered if 245 is present
2675 // please remove 245 if 245.a 245.b are found to be safe
2676 // applies to f.e. vprintf.c
2688 ; Peephole 245.a optimized conditional jump (r%1 and acc not set!)
2690 } if labelRefCount %2 1
2692 // this one will not be triggered if 245 is present
2693 // please remove 245 if 245.a 245.b are found to be safe
2694 // applies to f.e. scott-compare.c
2706 ; Peephole 245.b optimized conditional jump (r%1 and acc not set!)
2708 } if labelRefCount %2 1
2711 // rules 246.x apply to f.e. bitfields.c
2722 ; Peephole 246.a combined clr/clr
2739 ; Peephole 246.b combined set/set
2756 ; Peephole 246.c combined set/clr
2774 ; Peephole 246.d combined clr/set
2793 ; Peephole 246.e combined set/clr/clr
2812 ; Peephole 246.f combined set/clr/set
2832 ; Peephole 246.g combined clr/set/clr
2852 ; Peephole 246.h combined clr/set/set
2863 // rules 247.x apply to f.e. bitfields.c
2874 ; Peephole 247.a combined clr/clr
2891 ; Peephole 247.b combined set/set
2908 ; Peephole 247.c combined set/clr
2926 ; Peephole 247.d combined clr/set
2945 ; Peephole 247.e combined set/clr/clr
2964 ; Peephole 247.f combined set/clr/set
2984 ; Peephole 247.g combined clr/set/clr
3004 ; Peephole 247.h combined clr/set/set
3013 // Peepholes 248.x have to be compatible with the keyword volatile.
3014 // They optimize typical accesses to memory mapped I/O devices:
3015 // volatile xdata char t; t|=0x01;
3025 ; Peephole 248.a optimized or to xdata
3033 // volatile xdata char t; t&=0x01;
3043 ; Peephole 248.b optimized and to xdata
3051 // volatile xdata char t; t^=0x01;
3061 ; Peephole 248.c optimized xor to xdata
3069 // volatile xdata char t; t|=0x01; t&=~0x01; t|=0x01;
3089 ; Peephole 248.d optimized or/and/or to volatile xdata
3103 // volatile xdata char t; t&=~0x01; t|=0x01; t&=~0x01;
3123 ; Peephole 248.e optimized and/or/and to volatile xdata
3137 // volatile xdata char t; t|=0x01; t&=~0x01;
3151 ; Peephole 248.f optimized or/and to volatile xdata
3162 // volatile xdata char t; t&=~0x01; t|=0x01;
3176 ; Peephole 248.g optimized and/or to volatile xdata
3187 // volatile xdata char t; t^=0x01; t^=0x01;
3201 ; Peephole 248.h optimized xor/xor to volatile xdata
3212 // Peeepholes 248.i to 248.m are like 248.d to 248.h except they apply to bitfields:
3213 // xdata struct { unsigned b0:1; unsigned b1:1; unsigned b2:1; } xport;
3214 // xport.b0=1; xport.b0=0; xport.b0=1;
3231 ; Peephole 248.i optimized or/and/or to xdata bitfield
3260 ; Peephole 248.j optimized and/or/and to xdata bitfield
3284 ; Peephole 248.k optimized or/and to xdata bitfield
3305 ; Peephole 248.l optimized and/or to xdata bitfield
3326 ; Peephole 248.m optimized xor/xor to xdata bitfield
3341 ; Peephole 249.a jump optimization
3342 } if labelRefCount %1 1
3348 ; Peephole 249.b jump optimization
3349 } if labelRefCount %1 1
3352 // This allows non-interrupt and interrupt code to safely compete
3353 // for a resource without the non-interrupt code having to disable
3355 // volatile bit resource_is_free;
3356 // if( resource_is_free ) {
3357 // resource_is_free=0; do_something; resource_is_free=1;
3364 ; Peephole 250.a using atomic test and clear
3368 } if labelRefCount %3 0
3376 ; Peephole 250.b using atomic test and clear
3380 } if labelRefCount %2 1
3383 // not before peephole 250.b
3387 ; Peephole 251.a replaced ljmp to ret with ret
3389 } if labelIsReturnOnly
3391 // not before peephole 250.b
3395 ; Peephole 251.b replaced sjmp to ret with ret
3397 } if labelIsReturnOnly
3399 // applies to shifts.c and when accessing arrays with an unsigned integer index
3400 // saving 1 byte, 2 cycles
3410 ; Peephole 252 optimized left shift
3419 // applies to: void test( char c ) { if( c ) func1(); else func2(); }
3424 ; Peephole 253.a replaced lcall/ret with ljmp
3428 // applies to: void test( char c ) { if( c ) func1(); else func2(); }
3434 ; Peephole 253.b replaced lcall/ret with ljmp
3436 } if labelRefCount %2 0
3438 // applies to f.e. scott-bool1.c
3444 ; Peephole 253.c replaced lcall with ljmp
3451 // applies to f.e. funptrs.c
3452 // saves one byte if %1 is a register or @register
3457 ; Peephole 254 optimized left shift
3462 // applies to f.e. switch.c
3475 ; Peephole 255 optimized jump table index calculation
3477 cjne a,#(%1+0x01),.+1
3486 // applies to f.e. jump tables and scott-bool1.c.
3487 // similar peepholes can be constructed for other instructions
3488 // after which a flag or a register is known (like: djnz, cjne, jnc)
3494 ; Peephole 256.a removed redundant clr c
3497 } if labelRefCount %2 0
3499 // applies to f.e. logf.c
3505 ; Peephole 256.b removed redundant clr a
3508 } if labelRefCount %2 0
3510 // applies to f.e. bug-905492.c
3516 ; Peephole 256.c loading %3 with zero from a
3520 } if labelRefCount %2 0
3522 // applies to f.e. malloc.c
3529 ; Peephole 256.d loading %3 with zero from a
3534 } if labelRefCount(%2 0),operandsNotRelated('a' %4)
3543 ; Peephole 256.e loading %3 with zero from a
3549 } if labelRefCount(%2 0),operandsNotRelated('a' %4 %6)
3559 ; Peephole 256.f loading %2 with zero from a
3566 } if labelRefCount(%2 0),operandsNotRelated('a' %4 %6 %8)
3569 // unsigned char i=8; do{ } while(--i != 0);
3570 // this currently only applies if i is kept in a register
3575 ; Peephole 257 optimized decrement with compare
3580 // in_byte<<=1; if(in_bit) in_byte|=1;
3581 // helps f.e. reading data on a 3-wire (SPI) bus
3591 ; Peephole 258.a optimized bitbanging
3600 // in_byte<<=1; if(in_bit) in_byte|=1;
3610 ; Peephole 258.b optimized bitbanging
3619 // in_byte>>=1; if(in_bit) in_byte|=0x80;
3630 ; Peephole 258.c optimized bitbanging
3639 // in_byte>>=1; if(in_bit) in_byte|=0x80;
3650 ; Peephole 258.d optimized bitbanging
3659 // out_bit=out_byte&0x80; out_byte<<=1;
3660 // helps f.e. writing data on a 3-wire (SPI) bus
3669 ; Peephole 258.e optimized bitbanging
3676 // out_bit=out_byte&0x01; out_byte>>=1;
3686 ; Peephole 258.f optimized bitbanging
3694 // Peepholes 259.x are not compatible with peepholex 250.x
3695 // Peepholes 250.x add jumps to a previously unused label. As the
3696 // labelRefCount is not increased, peepholes 259.x are (mistakenly) applied.
3697 // (Mail on sdcc-devel 2004-10-25)
3698 // Note: Peepholes 193..199, 251 remove jumps to previously used labels without
3699 // decreasing labelRefCount (less dangerous - this f.e. leads to 253.c being
3700 // applied instead of 253.b))
3702 // applies to f.e. vprintf.c
3709 // ; Peephole 259.a removed redundant label %2 and ret
3711 //} if labelRefCount %2 0
3713 // applies to f.e. gets.c
3720 // ; Peephole 259.b removed redundant label %2 and ret
3722 //} if labelRefCount %2 0
3724 // optimizing jumptables
3725 // Please note: to enable peephole 260.x you currently have to set
3726 // the environment variable SDCC_SJMP_JUMPTABLE
3738 ; Peephole 260.a used sjmp in jumptable
3749 // optimizing jumptables
3762 ; Peephole 260.b used sjmp in jumptable
3774 // optimizing jumptables
3788 ; Peephole 260.c used sjmp in jumptable
3801 // optimizing jumptables
3816 ; Peephole 260.d used sjmp in jumptable
3830 // optimizing jumptables
3846 ; Peephole 260.e used sjmp in jumptable
3861 // optimizing jumptables
3879 ; Peephole 260.f used sjmp in jumptable
3896 // optimizing jumptables
3915 ; Peephole 260.g used sjmp in jumptable
3933 // optimizing jumptables
3953 ; Peephole 260.h used sjmp in jumptable
3972 // optimizing jumptables
3993 ; Peephole 260.i used sjmp in jumptable
4013 // optimizing jumptables
4035 ; Peephole 260.j used sjmp in jumptable
4056 // optimizing jumptables
4079 ; Peephole 260.k used sjmp in jumptable
4101 // optimizing jumptables
4125 ; Peephole 260.l used sjmp in jumptable
4148 // optimizing jumptables
4173 ; Peephole 260.m used sjmp in jumptable
4197 // applies to: a = (a << 1) | (a >> 15);
4209 ; Peephole 261.a optimized left rol
4219 // applies to: a = (a << 15) | (a >> 1);
4231 ; Peephole 261.b optimized right rol