ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/uae_cpu/compiler/compemu_support.cpp
(Generate patch)

Comparing BasiliskII/src/uae_cpu/compiler/compemu_support.cpp (file contents):
Revision 1.4 by gbeauche, 2002-09-18T11:41:56Z vs.
Revision 1.18 by gbeauche, 2003-03-21T19:12:44Z

# Line 1 | Line 1
1 + /*
2 + *  compiler/compemu_support.cpp - Core dynamic translation engine
3 + *
4 + *  Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
5 + *
6 + *  Adaptation for Basilisk II and improvements, copyright 2000-2002
7 + *    Gwenole Beauchesne
8 + *
9 + *  Basilisk II (C) 1997-2002 Christian Bauer
10 + *  
11 + *  This program is free software; you can redistribute it and/or modify
12 + *  it under the terms of the GNU General Public License as published by
13 + *  the Free Software Foundation; either version 2 of the License, or
14 + *  (at your option) any later version.
15 + *
16 + *  This program is distributed in the hope that it will be useful,
17 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 + *  GNU General Public License for more details.
20 + *
21 + *  You should have received a copy of the GNU General Public License
22 + *  along with this program; if not, write to the Free Software
23 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
24 + */
25 +
26   #if !REAL_ADDRESSING && !DIRECT_ADDRESSING
27   #error "Only Real or Direct Addressing is supported with the JIT Compiler"
28   #endif
# Line 44 | Line 69
69   #endif
70  
71   #ifndef WIN32
72 < #define PROFILE_COMPILE_TIME    1
72 > #define PROFILE_COMPILE_TIME            1
73 > #define PROFILE_UNTRANSLATED_INSNS      1
74   #endif
75  
76   #ifdef WIN32
# Line 69 | Line 95 | static clock_t emul_start_time = 0;
95   static clock_t emul_end_time    = 0;
96   #endif
97  
98 + #if PROFILE_UNTRANSLATED_INSNS
99 + const int untranslated_top_ten = 20;
100 + static uae_u32 raw_cputbl_count[65536] = { 0, };
101 + static uae_u16 opcode_nums[65536];
102 +
103 + static int untranslated_compfn(const void *e1, const void *e2)
104 + {
105 +        return raw_cputbl_count[*(const uae_u16 *)e1] < raw_cputbl_count[*(const uae_u16 *)e2];
106 + }
107 + #endif
108 +
109   compop_func *compfunctbl[65536];
110   compop_func *nfcompfunctbl[65536];
111   cpuop_func *nfcpufunctbl[65536];
112   uae_u8* comp_pc_p;
113  
114 + // From newcpu.cpp
115 + extern bool quit_program;
116 +
117   // gb-- Extra data for Basilisk II/JIT
118   #if JIT_DEBUG
119   static bool             JITDebug                        = false;        // Enable runtime disassemblers through mon?
# Line 88 | Line 128 | static bool            lazy_flush                      = true;         // Fl
128   static bool             avoid_fpu                       = true;         // Flag: compile FPU instructions ?
129   static bool             have_cmov                       = false;        // target has CMOV instructions ?
130   static bool             have_rat_stall          = true;         // target has partial register stalls ?
131 + const bool              tune_alignment          = true;         // Tune code alignments for running CPU ?
132 + const bool              tune_nop_fillers        = true;         // Tune no-op fillers for architecture
133 + static bool             setzflg_uses_bsf        = false;        // setzflg virtual instruction can use native BSF instruction correctly?
134 + static int              align_loops                     = 32;           // Align the start of loops
135 + static int              align_jumps                     = 32;           // Align the start of jumps
136   static int              zero_fd                         = -1;
137   static int              optcount[10]            = {
138          10,             // How often a block has to be executed before it is translated
# Line 104 | Line 149 | struct op_properties {
149   };
150   static op_properties prop[65536];
151  
107 // gb-- Control Flow Predicates
108
152   static inline int end_block(uae_u32 opcode)
153   {
154          return (prop[opcode].cflow & fl_end_block);
155   }
156  
157 + static inline bool is_const_jump(uae_u32 opcode)
158 + {
159 +        return (prop[opcode].cflow == fl_const_jump);
160 + }
161 +
162   static inline bool may_trap(uae_u32 opcode)
163   {
164          return (prop[opcode].cflow & fl_trap);
165   }
166  
167 + static inline unsigned int cft_map (unsigned int f)
168 + {
169 + #ifndef HAVE_GET_WORD_UNSWAPPED
170 +    return f;
171 + #else
172 +    return ((f >> 8) & 255) | ((f & 255) << 8);
173 + #endif
174 + }
175 +
176   uae_u8* start_pc_p;
177   uae_u32 start_pc;
178   uae_u32 current_block_pc_p;
# Line 143 | Line 200 | static void* popall_cache_miss=NULL;
200   static void* popall_recompile_block=NULL;
201   static void* popall_check_checksum=NULL;
202  
146 extern uae_u32 oink;
147 extern unsigned long foink3;
148 extern unsigned long foink;
149
203   /* The 68k only ever executes from even addresses. So right now, we
204   * waste half the entries in this array
205   * UPDATE: We now use those entries to store the start of the linked
# Line 491 | Line 544 | static void prepare_block(blockinfo* bi)
544     compiled. If the list of free blockinfos is empty, we allocate a new
545     pool of blockinfos and link the newly created blockinfos altogether
546     into the list of free blockinfos. Otherwise, we simply pop a structure
547 <   of the free list.
547 >   off the free list.
548  
549     Blockinfo are lazily deallocated, i.e. chained altogether in the
550     list of free blockinfos whenvever a translation cache flush (hard or
551     soft) request occurs.
552   */
553  
554 < #if USE_SEPARATE_BIA
555 < const int BLOCKINFO_POOL_SIZE = 128;
556 < struct blockinfo_pool {
557 <        blockinfo bi[BLOCKINFO_POOL_SIZE];
558 <        blockinfo_pool *next;
554 > template< class T >
555 > class LazyBlockAllocator
556 > {
557 >        enum {
558 >                kPoolSize = 1 + 4096 / sizeof(T)
559 >        };
560 >        struct Pool {
561 >                T chunk[kPoolSize];
562 >                Pool * next;
563 >        };
564 >        Pool * mPools;
565 >        T * mChunks;
566 > public:
567 >        LazyBlockAllocator() : mPools(0), mChunks(0) { }
568 >        ~LazyBlockAllocator();
569 >        T * acquire();
570 >        void release(T * const);
571   };
507 static blockinfo_pool * blockinfo_pools = 0;
508 static blockinfo *              free_blockinfos = 0;
509 #endif
572  
573 < static __inline__ blockinfo *alloc_blockinfo(void)
573 > template< class T >
574 > LazyBlockAllocator<T>::~LazyBlockAllocator()
575   {
576 < #if USE_SEPARATE_BIA
577 <        if (!free_blockinfos) {
578 <                // There is no blockinfo struct left, allocate a new
579 <                // pool and link the chunks into the free list
580 <                blockinfo_pool *bi_pool = (blockinfo_pool *)malloc(sizeof(blockinfo_pool));
581 <                for (blockinfo *bi = &bi_pool->bi[0]; bi < &bi_pool->bi[BLOCKINFO_POOL_SIZE]; bi++) {
582 <                        bi->next = free_blockinfos;
583 <                        free_blockinfos = bi;
576 >        Pool * currentPool = mPools;
577 >        while (currentPool) {
578 >                Pool * deadPool = currentPool;
579 >                currentPool = currentPool->next;
580 >                free(deadPool);
581 >        }
582 > }
583 >
584 > template< class T >
585 > T * LazyBlockAllocator<T>::acquire()
586 > {
587 >        if (!mChunks) {
588 >                // There is no chunk left, allocate a new pool and link the
589 >                // chunks into the free list
590 >                Pool * newPool = (Pool *)malloc(sizeof(Pool));
591 >                for (T * chunk = &newPool->chunk[0]; chunk < &newPool->chunk[kPoolSize]; chunk++) {
592 >                        chunk->next = mChunks;
593 >                        mChunks = chunk;
594                  }
595 <                bi_pool->next = blockinfo_pools;
596 <                blockinfo_pools = bi_pool;
595 >                newPool->next = mPools;
596 >                mPools = newPool;
597          }
598 <        blockinfo *bi = free_blockinfos;
599 <        free_blockinfos = bi->next;
600 < #else
528 <        blockinfo *bi = (blockinfo*)current_compile_p;
529 <        current_compile_p += sizeof(blockinfo);
530 < #endif
531 <        return bi;
598 >        T * chunk = mChunks;
599 >        mChunks = chunk->next;
600 >        return chunk;
601   }
602  
603 < static __inline__ void free_blockinfo(blockinfo *bi)
603 > template< class T >
604 > void LazyBlockAllocator<T>::release(T * const chunk)
605 > {
606 >        chunk->next = mChunks;
607 >        mChunks = chunk;
608 > }
609 >
610 > template< class T >
611 > class HardBlockAllocator
612   {
613 + public:
614 +        T * acquire() {
615 +                T * data = (T *)current_compile_p;
616 +                current_compile_p += sizeof(T);
617 +                return data;
618 +        }
619 +
620 +        void release(T * const chunk) {
621 +                // Deallocated on invalidation
622 +        }
623 + };
624 +
625   #if USE_SEPARATE_BIA
626 <        bi->next = free_blockinfos;
627 <        free_blockinfos = bi;
626 > static LazyBlockAllocator<blockinfo> BlockInfoAllocator;
627 > static LazyBlockAllocator<checksum_info> ChecksumInfoAllocator;
628 > #else
629 > static HardBlockAllocator<blockinfo> BlockInfoAllocator;
630 > static HardBlockAllocator<checksum_info> ChecksumInfoAllocator;
631   #endif
632 +
633 + static __inline__ checksum_info *alloc_checksum_info(void)
634 + {
635 +        checksum_info *csi = ChecksumInfoAllocator.acquire();
636 +        csi->next = NULL;
637 +        return csi;
638   }
639  
640 < static void free_blockinfo_pools(void)
640 > static __inline__ void free_checksum_info(checksum_info *csi)
641   {
642 < #if USE_SEPARATE_BIA
643 <        int blockinfo_pool_count = 0;
644 <        blockinfo_pool *curr_pool = blockinfo_pools;
645 <        while (curr_pool) {
646 <                blockinfo_pool_count++;
647 <                blockinfo_pool *dead_pool = curr_pool;
648 <                curr_pool = curr_pool->next;
649 <                free(dead_pool);
642 >        csi->next = NULL;
643 >        ChecksumInfoAllocator.release(csi);
644 > }
645 >
646 > static __inline__ void free_checksum_info_chain(checksum_info *csi)
647 > {
648 >        while (csi != NULL) {
649 >                checksum_info *csi2 = csi->next;
650 >                free_checksum_info(csi);
651 >                csi = csi2;
652          }
653 <        
654 <        uae_u32 blockinfo_pools_size = blockinfo_pool_count * BLOCKINFO_POOL_SIZE * sizeof(blockinfo);
655 <        write_log("### Blockinfo allocation statistics\n");
656 <        write_log("Number of blockinfo pools  : %d\n", blockinfo_pool_count);
657 <        write_log("Total number of blockinfos : %d (%d KB)\n",
658 <                          blockinfo_pool_count * BLOCKINFO_POOL_SIZE,
659 <                          blockinfo_pools_size / 1024);
560 <        write_log("\n");
653 > }
654 >
655 > static __inline__ blockinfo *alloc_blockinfo(void)
656 > {
657 >        blockinfo *bi = BlockInfoAllocator.acquire();
658 > #if USE_CHECKSUM_INFO
659 >        bi->csi = NULL;
660   #endif
661 +        return bi;
662 + }
663 +
664 + static __inline__ void free_blockinfo(blockinfo *bi)
665 + {
666 + #if USE_CHECKSUM_INFO
667 +        free_checksum_info_chain(bi->csi);
668 +        bi->csi = NULL;
669 + #endif
670 +        BlockInfoAllocator.release(bi);
671   }
672  
673   static __inline__ void alloc_blockinfos(void)
# Line 601 | Line 710 | static __inline__ void emit_long(uae_u32
710      target+=4;
711   }
712  
713 + static __inline__ void emit_block(const uae_u8 *block, uae_u32 blocklen)
714 + {
715 +        memcpy((uae_u8 *)target,block,blocklen);
716 +        target+=blocklen;
717 + }
718 +
719   static __inline__ uae_u32 reverse32(uae_u32 v)
720   {
721   #if 1
# Line 697 | Line 812 | static __inline__ void flush_flags(void)
812   int touchcnt;
813  
814   /********************************************************************
815 + * Partial register flushing for optimized calls                    *
816 + ********************************************************************/
817 +
818 + struct regusage {
819 +        uae_u16 rmask;
820 +        uae_u16 wmask;
821 + };
822 +
823 + static inline void ru_set(uae_u16 *mask, int reg)
824 + {
825 + #if USE_OPTIMIZED_CALLS
826 +        *mask |= 1 << reg;
827 + #endif
828 + }
829 +
830 + static inline bool ru_get(const uae_u16 *mask, int reg)
831 + {
832 + #if USE_OPTIMIZED_CALLS
833 +        return (*mask & (1 << reg));
834 + #else
835 +        /* Default: instruction reads & write to register */
836 +        return true;
837 + #endif
838 + }
839 +
840 + static inline void ru_set_read(regusage *ru, int reg)
841 + {
842 +        ru_set(&ru->rmask, reg);
843 + }
844 +
845 + static inline void ru_set_write(regusage *ru, int reg)
846 + {
847 +        ru_set(&ru->wmask, reg);
848 + }
849 +
850 + static inline bool ru_read_p(const regusage *ru, int reg)
851 + {
852 +        return ru_get(&ru->rmask, reg);
853 + }
854 +
855 + static inline bool ru_write_p(const regusage *ru, int reg)
856 + {
857 +        return ru_get(&ru->wmask, reg);
858 + }
859 +
860 + static void ru_fill_ea(regusage *ru, int reg, amodes mode,
861 +                                           wordsizes size, int write_mode)
862 + {
863 +        switch (mode) {
864 +        case Areg:
865 +                reg += 8;
866 +                /* fall through */
867 +        case Dreg:
868 +                ru_set(write_mode ? &ru->wmask : &ru->rmask, reg);
869 +                break;
870 +        case Ad16:
871 +                /* skip displacment */
872 +                m68k_pc_offset += 2;
873 +        case Aind:
874 +        case Aipi:
875 +        case Apdi:
876 +                ru_set_read(ru, reg+8);
877 +                break;
878 +        case Ad8r:
879 +                ru_set_read(ru, reg+8);
880 +                /* fall through */
881 +        case PC8r: {
882 +                uae_u16 dp = comp_get_iword((m68k_pc_offset+=2)-2);
883 +                reg = (dp >> 12) & 15;
884 +                ru_set_read(ru, reg);
885 +                if (dp & 0x100)
886 +                        m68k_pc_offset += (((dp & 0x30) >> 3) & 7) + ((dp & 3) * 2);
887 +                break;
888 +        }
889 +        case PC16:
890 +        case absw:
891 +        case imm0:
892 +        case imm1:
893 +                m68k_pc_offset += 2;
894 +                break;
895 +        case absl:
896 +        case imm2:
897 +                m68k_pc_offset += 4;
898 +                break;
899 +        case immi:
900 +                m68k_pc_offset += (size == sz_long) ? 4 : 2;
901 +                break;
902 +        }
903 + }
904 +
905 + /* TODO: split into a static initialization part and a dynamic one
906 +   (instructions depending on extension words) */
907 + static void ru_fill(regusage *ru, uae_u32 opcode)
908 + {
909 +        m68k_pc_offset += 2;
910 +
911 +        /* Default: no register is used or written to */
912 +        ru->rmask = 0;
913 +        ru->wmask = 0;
914 +
915 +        uae_u32 real_opcode = cft_map(opcode);
916 +        struct instr *dp = &table68k[real_opcode];
917 +
918 +        bool rw_dest = true;
919 +        bool handled = false;
920 +
921 +        /* Handle some instructions specifically */
922 +        uae_u16 reg, ext;
923 +        switch (dp->mnemo) {
924 +        case i_BFCHG:
925 +        case i_BFCLR:
926 +        case i_BFEXTS:
927 +        case i_BFEXTU:
928 +        case i_BFFFO:
929 +        case i_BFINS:
930 +        case i_BFSET:
931 +        case i_BFTST:
932 +                ext = comp_get_iword((m68k_pc_offset+=2)-2);
933 +                if (ext & 0x800) ru_set_read(ru, (ext >> 6) & 7);
934 +                if (ext & 0x020) ru_set_read(ru, ext & 7);
935 +                ru_fill_ea(ru, dp->dreg, (amodes)dp->dmode, (wordsizes)dp->size, 1);
936 +                if (dp->dmode == Dreg)
937 +                        ru_set_read(ru, dp->dreg);
938 +                switch (dp->mnemo) {
939 +                case i_BFEXTS:
940 +                case i_BFEXTU:
941 +                case i_BFFFO:
942 +                        ru_set_write(ru, (ext >> 12) & 7);
943 +                        break;
944 +                case i_BFINS:
945 +                        ru_set_read(ru, (ext >> 12) & 7);
946 +                        /* fall through */
947 +                case i_BFCHG:
948 +                case i_BFCLR:
949 +                case i_BSET:
950 +                        if (dp->dmode == Dreg)
951 +                                ru_set_write(ru, dp->dreg);
952 +                        break;
953 +                }
954 +                handled = true;
955 +                rw_dest = false;
956 +                break;
957 +
958 +        case i_BTST:
959 +                rw_dest = false;
960 +                break;
961 +
962 +        case i_CAS:
963 +        {
964 +                ext = comp_get_iword((m68k_pc_offset+=2)-2);
965 +                int Du = ext & 7;
966 +                ru_set_read(ru, Du);
967 +                int Dc = (ext >> 6) & 7;
968 +                ru_set_read(ru, Dc);
969 +                ru_set_write(ru, Dc);
970 +                break;
971 +        }
972 +        case i_CAS2:
973 +        {
974 +                int Dc1, Dc2, Du1, Du2, Rn1, Rn2;
975 +                ext = comp_get_iword((m68k_pc_offset+=2)-2);
976 +                Rn1 = (ext >> 12) & 15;
977 +                Du1 = (ext >> 6) & 7;
978 +                Dc1 = ext & 7;
979 +                ru_set_read(ru, Rn1);
980 +                ru_set_read(ru, Du1);
981 +                ru_set_read(ru, Dc1);
982 +                ru_set_write(ru, Dc1);
983 +                ext = comp_get_iword((m68k_pc_offset+=2)-2);
984 +                Rn2 = (ext >> 12) & 15;
985 +                Du2 = (ext >> 6) & 7;
986 +                Dc2 = ext & 7;
987 +                ru_set_read(ru, Rn2);
988 +                ru_set_read(ru, Du2);
989 +                ru_set_write(ru, Dc2);
990 +                break;
991 +        }
992 +        case i_DIVL: case i_MULL:
993 +                m68k_pc_offset += 2;
994 +                break;
995 +        case i_LEA:
996 +        case i_MOVE: case i_MOVEA: case i_MOVE16:
997 +                rw_dest = false;
998 +                break;
999 +        case i_PACK: case i_UNPK:
1000 +                rw_dest = false;
1001 +                m68k_pc_offset += 2;
1002 +                break;
1003 +        case i_TRAPcc:
1004 +                m68k_pc_offset += (dp->size == sz_long) ? 4 : 2;
1005 +                break;
1006 +        case i_RTR:
1007 +                /* do nothing, just for coverage debugging */
1008 +                break;
1009 +        /* TODO: handle EXG instruction */
1010 +        }
1011 +
1012 +        /* Handle A-Traps better */
1013 +        if ((real_opcode & 0xf000) == 0xa000) {
1014 +                handled = true;
1015 +        }
1016 +
1017 +        /* Handle EmulOps better */
1018 +        if ((real_opcode & 0xff00) == 0x7100) {
1019 +                handled = true;
1020 +                ru->rmask = 0xffff;
1021 +                ru->wmask = 0;
1022 +        }
1023 +
1024 +        if (dp->suse && !handled)
1025 +                ru_fill_ea(ru, dp->sreg, (amodes)dp->smode, (wordsizes)dp->size, 0);
1026 +
1027 +        if (dp->duse && !handled)
1028 +                ru_fill_ea(ru, dp->dreg, (amodes)dp->dmode, (wordsizes)dp->size, 1);
1029 +
1030 +        if (rw_dest)
1031 +                ru->rmask |= ru->wmask;
1032 +
1033 +        handled = handled || dp->suse || dp->duse;
1034 +
1035 +        /* Mark all registers as used/written if the instruction may trap */
1036 +        if (may_trap(opcode)) {
1037 +                handled = true;
1038 +                ru->rmask = 0xffff;
1039 +                ru->wmask = 0xffff;
1040 +        }
1041 +
1042 +        if (!handled) {
1043 +                write_log("ru_fill: %04x = { %04x, %04x }\n",
1044 +                                  real_opcode, ru->rmask, ru->wmask);
1045 +                abort();
1046 +        }
1047 + }
1048 +
1049 + /********************************************************************
1050   * register allocation per block logging                            *
1051   ********************************************************************/
1052  
# Line 2566 | Line 2916 | MIDFUNC(3,cmov_l_rm,(RW4 d, IMM s, IMM c
2916   }
2917   MENDFUNC(3,cmov_l_rm,(RW4 d, IMM s, IMM cc))
2918  
2919 < MIDFUNC(2,bsf_l_rr,(W4 d, R4 s))
2919 > MIDFUNC(1,setzflg_l,(RW4 r))
2920   {
2921 <    CLOBBER_BSF;
2922 <    s=readreg(s,4);
2923 <    d=writereg(d,4);
2924 <    raw_bsf_l_rr(d,s);
2925 <    unlock2(s);
2926 <    unlock2(d);
2921 >        if (setzflg_uses_bsf) {
2922 >                CLOBBER_BSF;
2923 >                r=rmw(r,4,4);
2924 >                raw_bsf_l_rr(r,r);
2925 >                unlock2(r);
2926 >        }
2927 >        else {
2928 >                Dif (live.flags_in_flags!=VALID) {
2929 >                        write_log("setzflg() wanted flags in native flags, they are %d\n",
2930 >                                          live.flags_in_flags);
2931 >                        abort();
2932 >                }
2933 >                r=readreg(r,4);
2934 >                int f=writereg(S11,4);
2935 >                int t=writereg(S12,4);
2936 >                raw_flags_set_zero(f,r,t);
2937 >                unlock2(f);
2938 >                unlock2(r);
2939 >                unlock2(t);
2940 >        }
2941   }
2942 < MENDFUNC(2,bsf_l_rr,(W4 d, R4 s))
2942 > MENDFUNC(1,setzflg_l,(RW4 r))
2943  
2944   MIDFUNC(2,imul_32_32,(RW4 d, R4 s))
2945   {
# Line 4513 | Line 4877 | static inline const char *str_on_off(boo
4877          return b ? "on" : "off";
4878   }
4879  
4516 static __inline__ unsigned int cft_map (unsigned int f)
4517 {
4518 #ifndef HAVE_GET_WORD_UNSWAPPED
4519    return f;
4520 #else
4521    return ((f >> 8) & 255) | ((f & 255) << 8);
4522 #endif
4523 }
4524
4880   void compiler_init(void)
4881   {
4882          static bool initialized = false;
# Line 4560 | Line 4915 | void compiler_init(void)
4915          
4916          // Initialize target CPU (check for features, e.g. CMOV, rat stalls)
4917          raw_init_cpu();
4918 +        setzflg_uses_bsf = target_check_bsf();
4919          write_log("<JIT compiler> : target processor has CMOV instructions : %s\n", have_cmov ? "yes" : "no");
4920          write_log("<JIT compiler> : target processor can suffer from partial register stalls : %s\n", have_rat_stall ? "yes" : "no");
4921 +        write_log("<JIT compiler> : alignment for loops, jumps are %d, %d\n", align_loops, align_jumps);
4922          
4923          // Translation cache flush mechanism
4924          lazy_flush = PrefsFindBool("jitlazyflush");
# Line 4572 | Line 4929 | void compiler_init(void)
4929          write_log("<JIT compiler> : register aliasing : %s\n", str_on_off(1));
4930          write_log("<JIT compiler> : FP register aliasing : %s\n", str_on_off(USE_F_ALIAS));
4931          write_log("<JIT compiler> : lazy constant offsetting : %s\n", str_on_off(USE_OFFSET));
4932 +        write_log("<JIT compiler> : block inlining : %s\n", str_on_off(USE_INLINING));
4933          write_log("<JIT compiler> : separate blockinfo allocation : %s\n", str_on_off(USE_SEPARATE_BIA));
4934          
4935          // Build compiler tables
# Line 4579 | Line 4937 | void compiler_init(void)
4937          
4938          initialized = true;
4939          
4940 + #if PROFILE_UNTRANSLATED_INSNS
4941 +        write_log("<JIT compiler> : gather statistics on untranslated insns count\n");
4942 + #endif
4943 +
4944   #if PROFILE_COMPILE_TIME
4945          write_log("<JIT compiler> : gather statistics on translation time\n");
4946          emul_start_time = clock();
# Line 4597 | Line 4959 | void compiler_exit(void)
4959                  compiled_code = 0;
4960          }
4961          
4600        // Deallocate blockinfo pools
4601        free_blockinfo_pools();
4602        
4962   #ifndef WIN32
4963          // Close /dev/zero
4964          if (zero_fd > 0)
# Line 4615 | Line 4974 | void compiler_exit(void)
4974                  100.0*double(compile_time)/double(emul_time));
4975          write_log("\n");
4976   #endif
4977 +
4978 + #if PROFILE_UNTRANSLATED_INSNS
4979 +        uae_u64 untranslated_count = 0;
4980 +        for (int i = 0; i < 65536; i++) {
4981 +                opcode_nums[i] = i;
4982 +                untranslated_count += raw_cputbl_count[i];
4983 +        }
4984 +        write_log("Sorting out untranslated instructions count...\n");
4985 +        qsort(opcode_nums, 65536, sizeof(uae_u16), untranslated_compfn);
4986 +        write_log("\nRank  Opc      Count Name\n");
4987 +        for (int i = 0; i < untranslated_top_ten; i++) {
4988 +                uae_u32 count = raw_cputbl_count[opcode_nums[i]];
4989 +                struct instr *dp;
4990 +                struct mnemolookup *lookup;
4991 +                if (!count)
4992 +                        break;
4993 +                dp = table68k + opcode_nums[i];
4994 +                for (lookup = lookuptab; lookup->mnemo != dp->mnemo; lookup++)
4995 +                        ;
4996 +                write_log("%03d: %04x %10lu %s\n", i, opcode_nums[i], count, lookup->name);
4997 +        }
4998 + #endif
4999   }
5000  
5001   bool compiler_use_jit(void)
# Line 4843 | Line 5224 | void freescratch(void)
5224  
5225   static void align_target(uae_u32 a)
5226   {
5227 <    /* Fill with NOPs --- makes debugging with gdb easier */
5228 <    while ((uae_u32)target&(a-1))
5229 <        *target++=0x90;
5227 >        if (!a)
5228 >                return;
5229 >
5230 >        if (tune_nop_fillers)
5231 >                raw_emit_nop_filler(a - (((uae_u32)target) & (a - 1)));
5232 >        else {
5233 >                /* Fill with NOPs --- makes debugging with gdb easier */
5234 >                while ((uae_u32)target&(a-1))
5235 >                        *target++=0x90;
5236 >        }
5237   }
5238  
5239   static __inline__ int isinrom(uintptr addr)
# Line 5170 | Line 5558 | void alloc_cache(void)
5558  
5559  
5560  
5561 < extern cpuop_rettype op_illg_1 (uae_u32 opcode) REGPARAM;
5561 > extern void op_illg_1 (uae_u32 opcode) REGPARAM;
5562  
5563   static void calc_checksum(blockinfo* bi, uae_u32* c1, uae_u32* c2)
5564   {
5565 <    uae_u32 k1=0;
5566 <    uae_u32 k2=0;
5179 <    uae_s32 len=bi->len;
5180 <    uae_u32 tmp=bi->min_pcp;
5181 <    uae_u32* pos;
5565 >    uae_u32 k1 = 0;
5566 >    uae_u32 k2 = 0;
5567  
5568 <    len+=(tmp&3);
5569 <    tmp&=(~3);
5570 <    pos=(uae_u32*)tmp;
5568 > #if USE_CHECKSUM_INFO
5569 >    checksum_info *csi = bi->csi;
5570 >        Dif(!csi) abort();
5571 >        while (csi) {
5572 >                uae_s32 len = csi->length;
5573 >                uae_u32 tmp = (uae_u32)csi->start_p;
5574 > #else
5575 >                uae_s32 len = bi->len;
5576 >                uae_u32 tmp = (uae_u32)bi->min_pcp;
5577 > #endif
5578 >                uae_u32*pos;
5579  
5580 <    if (len<0 || len>MAX_CHECKSUM_LEN) {
5581 <        *c1=0;
5582 <        *c2=0;
5583 <    }
5584 <    else {
5585 <        while (len>0) {
5586 <            k1+=*pos;
5587 <            k2^=*pos;
5588 <            pos++;
5589 <            len-=4;
5580 >                len += (tmp & 3);
5581 >                tmp &= ~3;
5582 >                pos = (uae_u32 *)tmp;
5583 >
5584 >                if (len >= 0 && len <= MAX_CHECKSUM_LEN) {
5585 >                        while (len > 0) {
5586 >                                k1 += *pos;
5587 >                                k2 ^= *pos;
5588 >                                pos++;
5589 >                                len -= 4;
5590 >                        }
5591 >                }
5592 >
5593 > #if USE_CHECKSUM_INFO
5594 >                csi = csi->next;
5595          }
5596 <        *c1=k1;
5597 <        *c2=k2;
5598 <    }
5596 > #endif
5597 >
5598 >        *c1 = k1;
5599 >        *c2 = k2;
5600   }
5601  
5602 < static void show_checksum(blockinfo* bi)
5602 > #if 0
5603 > static void show_checksum(CSI_TYPE* csi)
5604   {
5605      uae_u32 k1=0;
5606      uae_u32 k2=0;
5607 <    uae_s32 len=bi->len;
5608 <    uae_u32 tmp=(uae_u32)bi->pc_p;
5607 >    uae_s32 len=CSI_LENGTH(csi);
5608 >    uae_u32 tmp=(uae_u32)CSI_START_P(csi);
5609      uae_u32* pos;
5610  
5611      len+=(tmp&3);
# Line 5224 | Line 5624 | static void show_checksum(blockinfo* bi)
5624          write_log(" bla\n");
5625      }
5626   }
5627 + #endif
5628  
5629  
5630   int check_for_cache_miss(void)
# Line 5277 | Line 5678 | static int called_check_checksum(blockin
5678   static inline int block_check_checksum(blockinfo* bi)
5679   {
5680      uae_u32     c1,c2;
5681 <    int         isgood;
5681 >    bool        isgood;
5682      
5683      if (bi->status!=BI_NEED_CHECK)
5684          return 1;  /* This block is in a checked state */
5685      
5686      checksum_count++;
5687 +
5688      if (bi->c1 || bi->c2)
5689          calc_checksum(bi,&c1,&c2);
5690      else {
5691          c1=c2=1;  /* Make sure it doesn't match */
5692 <    }
5692 >        }
5693      
5694      isgood=(c1==bi->c1 && c2==bi->c2);
5695 +
5696      if (isgood) {
5697          /* This block is still OK. So we reactivate. Of course, that
5698             means we have to move it into the needs-to-be-flushed list */
# Line 5407 | Line 5810 | static __inline__ void create_popalls(vo
5810       registers before jumping back to the various get-out routines.
5811       This generates the code for it.
5812    */
5813 <  popall_do_nothing=current_compile_p;
5813 >  align_target(align_jumps);
5814 >  popall_do_nothing=get_target();
5815    for (i=0;i<N_REGS;i++) {
5816        if (need_to_preserve[i])
5817            raw_pop_l_r(i);
5818    }
5819    raw_jmp((uae_u32)do_nothing);
5416  align_target(32);
5820    
5821 +  align_target(align_jumps);
5822    popall_execute_normal=get_target();
5823    for (i=0;i<N_REGS;i++) {
5824        if (need_to_preserve[i])
5825            raw_pop_l_r(i);
5826    }
5827    raw_jmp((uae_u32)execute_normal);
5424  align_target(32);
5828  
5829 +  align_target(align_jumps);
5830    popall_cache_miss=get_target();
5831    for (i=0;i<N_REGS;i++) {
5832        if (need_to_preserve[i])
5833            raw_pop_l_r(i);
5834    }
5835    raw_jmp((uae_u32)cache_miss);
5432  align_target(32);
5836  
5837 +  align_target(align_jumps);
5838    popall_recompile_block=get_target();
5839    for (i=0;i<N_REGS;i++) {
5840        if (need_to_preserve[i])
5841            raw_pop_l_r(i);
5842    }
5843    raw_jmp((uae_u32)recompile_block);
5844 <  align_target(32);
5845 <  
5844 >
5845 >  align_target(align_jumps);
5846    popall_exec_nostats=get_target();
5847    for (i=0;i<N_REGS;i++) {
5848        if (need_to_preserve[i])
5849            raw_pop_l_r(i);
5850    }
5851    raw_jmp((uae_u32)exec_nostats);
5852 <  align_target(32);
5853 <  
5852 >
5853 >  align_target(align_jumps);
5854    popall_check_checksum=get_target();
5855    for (i=0;i<N_REGS;i++) {
5856        if (need_to_preserve[i])
5857            raw_pop_l_r(i);
5858    }
5859    raw_jmp((uae_u32)check_checksum);
5860 <  align_target(32);
5861 <  
5860 >
5861 >  align_target(align_jumps);
5862    current_compile_p=get_target();
5863   #else
5864    popall_exec_nostats=(void *)exec_nostats;
# Line 5463 | Line 5867 | static __inline__ void create_popalls(vo
5867    popall_recompile_block=(void *)recompile_block;
5868    popall_do_nothing=(void *)do_nothing;
5869    popall_check_checksum=(void *)check_checksum;
5466  pushall_call_handler=get_target();  
5870   #endif
5871  
5872    /* And now, the code to do the matching pushes and then jump
# Line 5479 | Line 5882 | static __inline__ void create_popalls(vo
5882    raw_mov_l_rm(r,(uae_u32)&regs.pc_p);
5883    raw_and_l_ri(r,TAGMASK);
5884    raw_jmp_m_indexed((uae_u32)cache_tags,r,4);
5885 +
5886 + #ifdef X86_ASSEMBLY
5887 +  align_target(align_jumps);
5888 +  m68k_compile_execute = (void (*)(void))get_target();
5889 +  for (i=N_REGS;i--;) {
5890 +          if (need_to_preserve[i])
5891 +                  raw_push_l_r(i);
5892 +  }
5893 +  align_target(align_loops);
5894 +  uae_u32 dispatch_loop = (uae_u32)get_target();
5895 +  r=REG_PC_TMP;
5896 +  raw_mov_l_rm(r,(uae_u32)&regs.pc_p);
5897 +  raw_and_l_ri(r,TAGMASK);
5898 +  raw_call_m_indexed((uae_u32)cache_tags,r,4);
5899 +  raw_cmp_l_mi((uae_u32)&regs.spcflags,0);
5900 +  raw_jcc_b_oponly(NATIVE_CC_EQ);
5901 +  emit_byte(dispatch_loop-((uae_u32)get_target()+1));
5902 +  raw_call((uae_u32)m68k_do_specialties);
5903 +  raw_test_l_rr(REG_RESULT,REG_RESULT);
5904 +  raw_jcc_b_oponly(NATIVE_CC_EQ);
5905 +  emit_byte(dispatch_loop-((uae_u32)get_target()+1));
5906 +  raw_cmp_b_mi((uae_u32)&quit_program,0);
5907 +  raw_jcc_b_oponly(NATIVE_CC_EQ);
5908 +  emit_byte(dispatch_loop-((uae_u32)get_target()+1));
5909 +  for (i=0;i<N_REGS;i++) {
5910 +          if (need_to_preserve[i])
5911 +                  raw_pop_l_r(i);
5912 +  }
5913 +  raw_ret();
5914 + #endif
5915   }
5916  
5917   static __inline__ void reset_lists(void)
# Line 5496 | Line 5929 | static void prepare_block(blockinfo* bi)
5929      int i;
5930  
5931      set_target(current_compile_p);
5932 <    align_target(32);
5932 >    align_target(align_jumps);
5933      bi->direct_pen=(cpuop_func *)get_target();
5934      raw_mov_l_rm(0,(uae_u32)&(bi->pc_p));
5935      raw_mov_l_mr((uae_u32)&regs.pc_p,0);
5936      raw_jmp((uae_u32)popall_execute_normal);
5937  
5938 <    align_target(32);
5938 >    align_target(align_jumps);
5939      bi->direct_pcc=(cpuop_func *)get_target();
5940      raw_mov_l_rm(0,(uae_u32)&(bi->pc_p));
5941      raw_mov_l_mr((uae_u32)&regs.pc_p,0);
5942      raw_jmp((uae_u32)popall_check_checksum);
5510
5511    align_target(32);
5943      current_compile_p=get_target();
5944  
5945      bi->deplist=NULL;
# Line 5522 | Line 5953 | static void prepare_block(blockinfo* bi)
5953      //bi->env=empty_ss;
5954   }
5955  
5956 + static bool avoid_opcode(uae_u32 opcode)
5957 + {
5958 + #if JIT_DEBUG
5959 +        struct instr *dp = &table68k[opcode];
5960 +        // filter opcodes per type, integral value, or whatever
5961 + #endif
5962 +        return false;
5963 + }
5964 +
5965   void build_comp(void)
5966   {
5967          int i;
# Line 5561 | Line 6001 | void build_comp(void)
6001          
6002          for (i = 0; tbl[i].opcode < 65536; i++) {
6003                  int cflow = table68k[tbl[i].opcode].cflow;
6004 +                if (USE_INLINING && ((cflow & fl_const_jump) != 0))
6005 +                        cflow = fl_const_jump;
6006 +                else
6007 +                        cflow &= ~fl_const_jump;
6008                  prop[cft_map(tbl[i].opcode)].cflow = cflow;
6009  
6010                  int uses_fpu = tbl[i].specific & 32;
6011 <                if (uses_fpu && avoid_fpu)
6011 >                if ((uses_fpu && avoid_fpu) || avoid_opcode(tbl[i].opcode))
6012                          compfunctbl[cft_map(tbl[i].opcode)] = NULL;
6013                  else
6014                          compfunctbl[cft_map(tbl[i].opcode)] = tbl[i].handler;
# Line 5572 | Line 6016 | void build_comp(void)
6016  
6017      for (i = 0; nftbl[i].opcode < 65536; i++) {
6018                  int uses_fpu = tbl[i].specific & 32;
6019 <                if (uses_fpu && avoid_fpu)
6019 >                if ((uses_fpu && avoid_fpu) || avoid_opcode(nftbl[i].opcode))
6020                          nfcompfunctbl[cft_map(nftbl[i].opcode)] = NULL;
6021                  else
6022                          nfcompfunctbl[cft_map(nftbl[i].opcode)] = nftbl[i].handler;
# Line 5854 | Line 6298 | static void compile_block(cpu_history* p
6298          int r;
6299          int was_comp=0;
6300          uae_u8 liveflags[MAXRUN+1];
6301 + #if USE_CHECKSUM_INFO
6302 +        bool trace_in_rom = isinrom((uintptr)pc_hist[0].location);
6303 +        uae_u32 max_pcp=(uae_u32)pc_hist[blocklen - 1].location;
6304 +        uae_u32 min_pcp=max_pcp;
6305 + #else
6306          uae_u32 max_pcp=(uae_u32)pc_hist[0].location;
6307          uae_u32 min_pcp=max_pcp;
6308 + #endif
6309          uae_u32 cl=cacheline(pc_hist[0].location);
6310          void* specflags=(void*)&regs.spcflags;
6311          blockinfo* bi=NULL;
# Line 5899 | Line 6349 | static void compile_block(cpu_history* p
6349          remove_deps(bi); /* We are about to create new code */
6350          bi->optlevel=optlev;
6351          bi->pc_p=(uae_u8*)pc_hist[0].location;
6352 + #if USE_CHECKSUM_INFO
6353 +        free_checksum_info_chain(bi->csi);
6354 +        bi->csi = NULL;
6355 + #endif
6356          
6357          liveflags[blocklen]=0x1f; /* All flags needed afterwards */
6358          i=blocklen;
# Line 5906 | Line 6360 | static void compile_block(cpu_history* p
6360              uae_u16* currpcp=pc_hist[i].location;
6361              uae_u32 op=DO_GET_OPCODE(currpcp);
6362  
6363 + #if USE_CHECKSUM_INFO
6364 +                trace_in_rom = trace_in_rom && isinrom((uintptr)currpcp);
6365 + #if USE_INLINING
6366 +                if (is_const_jump(op)) {
6367 +                        checksum_info *csi = alloc_checksum_info();
6368 +                        csi->start_p = (uae_u8 *)min_pcp;
6369 +                        csi->length = max_pcp - min_pcp + LONGEST_68K_INST;
6370 +                        csi->next = bi->csi;
6371 +                        bi->csi = csi;
6372 +                        max_pcp = (uae_u32)currpcp;
6373 +                }
6374 + #endif
6375 +                min_pcp = (uae_u32)currpcp;
6376 + #else
6377              if ((uae_u32)currpcp<min_pcp)
6378                  min_pcp=(uae_u32)currpcp;
6379              if ((uae_u32)currpcp>max_pcp)
6380                  max_pcp=(uae_u32)currpcp;
6381 + #endif
6382  
6383                  liveflags[i]=((liveflags[i+1]&
6384                                 (~prop[op].set_flags))|
# Line 5918 | Line 6387 | static void compile_block(cpu_history* p
6387                      liveflags[i]&= ~FLAG_Z;
6388          }
6389  
6390 + #if USE_CHECKSUM_INFO
6391 +        checksum_info *csi = alloc_checksum_info();
6392 +        csi->start_p = (uae_u8 *)min_pcp;
6393 +        csi->length = max_pcp - min_pcp + LONGEST_68K_INST;
6394 +        csi->next = bi->csi;
6395 +        bi->csi = csi;
6396 + #endif
6397 +
6398          bi->needed_flags=liveflags[0];
6399  
6400 <        align_target(32);
6400 >        align_target(align_loops);
6401          was_comp=0;
6402  
6403          bi->direct_handler=(cpuop_func *)get_target();
# Line 5979 | Line 6456 | static void compile_block(cpu_history* p
6456                          comp_pc_p=(uae_u8*)pc_hist[i].location;
6457                          init_comp();
6458                      }
6459 <                    was_comp++;
6459 >                    was_comp=1;
6460  
6461                      comptbl[opcode](opcode);
6462                      freescratch();
# Line 6007 | Line 6484 | static void compile_block(cpu_history* p
6484                      raw_mov_l_mi((uae_u32)&regs.pc_p,
6485                                   (uae_u32)pc_hist[i].location);
6486                      raw_call((uae_u32)cputbl[opcode]);
6487 <                    //raw_add_l_mi((uae_u32)&oink,1); // FIXME
6487 > #if PROFILE_UNTRANSLATED_INSNS
6488 >                        // raw_cputbl_count[] is indexed with plain opcode (in m68k order)
6489 >                        raw_add_l_mi((uae_u32)&raw_cputbl_count[cft_map(opcode)],1);
6490 > #endif
6491   #if USE_NORMAL_CALLING_CONVENTION
6492                      raw_inc_sp(4);
6493   #endif
6014                    if (needed_flags) {
6015                        //raw_mov_l_mi((uae_u32)&foink3,(uae_u32)opcode+65536);
6016                    }
6017                    else {
6018                        //raw_mov_l_mi((uae_u32)&foink3,(uae_u32)opcode);
6019                    }
6494                      
6495                      if (i < blocklen - 1) {
6496                          uae_s8* branchadd;
# Line 6095 | Line 6569 | static void compile_block(cpu_history* p
6569                  raw_jmp((uae_u32)popall_do_nothing);
6570                  create_jmpdep(bi,0,tba,t1);
6571  
6572 <                align_target(16);
6572 >                align_target(align_jumps);
6573                  /* not-predicted outcome */
6574                  *branchadd=(uae_u32)get_target()-((uae_u32)branchadd+4);
6575                  live=tmp; /* Ouch again */
# Line 6164 | Line 6638 | static void compile_block(cpu_history* p
6638          big_to_small_state(&live,&(bi->env));
6639   #endif
6640  
6641 + #if USE_CHECKSUM_INFO
6642 +        remove_from_list(bi);
6643 +        if (trace_in_rom) {
6644 +                // No need to checksum that block trace on cache invalidation
6645 +                free_checksum_info_chain(bi->csi);
6646 +                bi->csi = NULL;
6647 +                add_to_dormant(bi);
6648 +        }
6649 +        else {
6650 +            calc_checksum(bi,&(bi->c1),&(bi->c2));
6651 +                add_to_active(bi);
6652 +        }
6653 + #else
6654          if (next_pc_p+extra_len>=max_pcp &&
6655              next_pc_p+extra_len<max_pcp+LONGEST_68K_INST)
6656              max_pcp=next_pc_p+extra_len;  /* extra_len covers flags magic */
6657          else
6658              max_pcp+=LONGEST_68K_INST;
6659 +
6660          bi->len=max_pcp-min_pcp;
6661          bi->min_pcp=min_pcp;
6662 <                    
6662 >        
6663          remove_from_list(bi);
6664          if (isinrom(min_pcp) && isinrom(max_pcp)) {
6665              add_to_dormant(bi); /* No need to checksum it on cache flush.
# Line 6182 | Line 6670 | static void compile_block(cpu_history* p
6670              calc_checksum(bi,&(bi->c1),&(bi->c2));
6671              add_to_active(bi);
6672          }
6673 + #endif
6674          
6675          current_cache_size += get_target() - (uae_u8 *)current_compile_p;
6676          
# Line 6201 | Line 6690 | static void compile_block(cpu_history* p
6690   #endif
6691          
6692          log_dump();
6693 <        align_target(32);
6693 >        align_target(align_jumps);
6694  
6695          /* This is the non-direct handler */
6696          bi->handler=
# Line 6217 | Line 6706 | static void compile_block(cpu_history* p
6706  
6707          raw_jmp((uae_u32)bi->direct_handler);
6708  
6220        align_target(32);
6709          current_compile_p=get_target();
6222
6710          raise_in_cl_list(bi);
6711          
6712          /* We will flush soon, anyway, so let's do it now */
# Line 6245 | Line 6732 | void exec_nostats(void)
6732   {
6733          for (;;)  {
6734                  uae_u32 opcode = GET_OPCODE;
6248 #ifdef X86_ASSEMBLY__disable
6249                __asm__ __volatile__("\tpushl %%ebp\n\tcall *%%ebx\n\tpopl %%ebp" /* FIXME */
6250                                                         : : "b" (cpufunctbl[opcode]), "a" (opcode)
6251                                                         : "%edx", "%ecx", "%esi", "%edi",  "%ebp", "memory", "cc");
6252 #else
6735                  (*cpufunctbl[opcode])(opcode);
6254 #endif
6736                  if (end_block(opcode) || SPCFLAGS_TEST(SPCFLAG_ALL)) {
6737                          return; /* We will deal with the spcflags in the caller */
6738                  }
# Line 6276 | Line 6757 | void execute_normal(void)
6757   #if FLIGHT_RECORDER
6758                          m68k_record_step(m68k_getpc());
6759   #endif
6279 #ifdef X86_ASSEMBLY__disable
6280                        __asm__ __volatile__("\tpushl %%ebp\n\tcall *%%ebx\n\tpopl %%ebp" /* FIXME */
6281                                                                 : : "b" (cpufunctbl[opcode]), "a" (opcode)
6282                                                                 : "%edx", "%ecx", "%esi", "%edi", "%ebp", "memory", "cc");
6283 #else
6760                          (*cpufunctbl[opcode])(opcode);
6285 #endif
6761                          if (end_block(opcode) || SPCFLAGS_TEST(SPCFLAG_ALL) || blocklen>=MAXRUN) {
6762                                  compile_block(pc_hist, blocklen);
6763                                  return; /* We will deal with the spcflags in the caller */
# Line 6295 | Line 6770 | void execute_normal(void)
6770  
6771   typedef void (*compiled_handler)(void);
6772  
6773 + #ifdef X86_ASSEMBLY
6774 + void (*m68k_compile_execute)(void) = NULL;
6775 + #else
6776   void m68k_do_compile_execute(void)
6777   {
6778          for (;;) {
6301 #ifdef X86_ASSEMBLY
6302                __asm__ __volatile__("\tpushl %%ebp\n\tcall *%%ebx\n\tpopl %%ebp" /* FIXME */
6303                                                         : : "b" (cache_tags[cacheline(regs.pc_p)].handler)
6304                                                         : "%edx", "%ecx", "%eax", "%esi", "%edi", "%ebp", "memory", "cc");
6305 #else
6779                  ((compiled_handler)(pushall_call_handler))();
6307 #endif
6780                  /* Whenever we return from that, we should check spcflags */
6781                  if (SPCFLAGS_TEST(SPCFLAG_ALL)) {
6782                          if (m68k_do_specialties ())
# Line 6312 | Line 6784 | void m68k_do_compile_execute(void)
6784                  }
6785          }
6786   }
6787 + #endif

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines