• R/O
  • SSH
  • HTTPS

gpsp-kai: Commit


Commit MetaInfo

Revision378 (tree)
Time2008-03-08 10:32:42
Authortakka

Log Message

(empty log message)

Change Summary

Incremental Difference

--- trunk/gpsp-kai-test/src/cpu_new.c (nonexistent)
+++ trunk/gpsp-kai-test/src/cpu_new.c (revision 378)
@@ -0,0 +1,4557 @@
1+/* unofficial gameplaySP kai
2+ *
3+ * Copyright (C) 2006 Exophase <exophase@gmail.com>
4+ * Copyright (C) 2007 takka <takka@tfact.net>
5+ *
6+ * This program is free software; you can redistribute it and/or
7+ * modify it under the terms of the GNU General Public License as
8+ * published by the Free Software Foundation; either version 2 of
9+ * the License, or (at your option) any later version.
10+ *
11+ * This program is distributed in the hope that it will be useful,
12+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14+ * General Public License for more details.
15+ *
16+ * You should have received a copy of the GNU General Public License
17+ * along with this program; if not, write to the Free Software
18+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19+ */
20+
21+// Important todo:
22+// - stm reglist writeback when base is in the list needs adjustment
23+// - block memory needs psr swapping and user mode reg swapping
24+
25+#include "common.h"
26+
27+// Default
28+u32 idle_loop_targets = 0;
29+u32 idle_loop_target_pc[MAX_IDLE_LOOPS];
30+u32 force_pc_update_target = 0xFFFFFFFF;
31+u32 translation_gate_target_pc[MAX_TRANSLATION_GATES];
32+u32 translation_gate_targets = 0;
33+u32 iwram_stack_optimize = 1;
34+// u32 allow_smc_ram_u8 = 1;
35+// u32 allow_smc_ram_u16 = 1;
36+// u32 allow_smc_ram_u32 = 1;
37+
38+u32 reg_mode[7][7];
39+
40+// When switching modes set spsr[new_mode] to cpsr. Modifying PC as the
41+// target of a data proc instruction will set cpsr to spsr[cpu_mode].
42+u32 spsr[6];
43+
44+
45+typedef enum
46+{
47+ TRANSLATION_REGION_RAM,
48+ TRANSLATION_REGION_ROM,
49+ TRANSLATION_REGION_BIOS
50+} TRANSLATION_REGION_TYPE;
51+
52+#define ROM_TRANSLATION_CACHE_SIZE (1024 * 512 * 4)
53+#define RAM_TRANSLATION_CACHE_SIZE (1024 * 384)
54+#define BIOS_TRANSLATION_CACHE_SIZE (1024 * 128)
55+#define TRANSLATION_CACHE_LIMIT_THRESHOLD (1024)
56+#define ROM_BRANCH_HASH_SIZE (1024 * 64)
57+
58+static u32 *rom_branch_hash[ROM_BRANCH_HASH_SIZE];
59+
60+static u8 rom_translation_cache[ROM_TRANSLATION_CACHE_SIZE];
61+static u8 *rom_translation_ptr = rom_translation_cache;
62+
63+static u8 ram_translation_cache[RAM_TRANSLATION_CACHE_SIZE];
64+static u8 *ram_translation_ptr = ram_translation_cache;
65+static u32 iwram_code_min = 0xFFFFFFFF;
66+static u32 iwram_code_max = 0xFFFFFFFF;
67+static u32 ewram_code_min = 0xFFFFFFFF;
68+static u32 ewram_code_max = 0xFFFFFFFF;
69+
70+static u8 bios_translation_cache[BIOS_TRANSLATION_CACHE_SIZE];
71+static u8 *bios_translation_ptr = bios_translation_cache;
72+
73+static s32 translate_block_arm(u32 pc,
74+ TRANSLATION_REGION_TYPE translation_region, u32 smc_enable);
75+static s32 translate_block_thumb(u32 pc,
76+ TRANSLATION_REGION_TYPE translation_region, u32 smc_enable);
77+
78+static const u8 bit_count[256] =
79+{
80+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
81+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
82+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
83+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
84+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
85+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
86+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
87+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
88+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
89+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
90+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
91+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
92+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
93+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
94+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
95+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
96+};
97+
98+static const u32 psr_masks[16] =
99+{
100+ 0x00000000, 0x000000FF, 0x0000FF00, 0x0000FFFF,
101+ 0x00FF0000, 0x00FF00FF, 0x00FFFF00, 0x00FFFFFF,
102+ 0xFF000000, 0xFF0000FF, 0xFF00FF00, 0xFF00FFFF,
103+ 0xFFFF0000, 0xFFFF00FF, 0xFFFFFF00, 0xFFFFFFFF
104+};
105+
106+static const u32 cpu_modes[32] =
107+{
108+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_INVALID,
109+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_INVALID,
110+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_INVALID,
111+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_INVALID,
112+ MODE_USER, MODE_FIQ, MODE_IRQ, MODE_SUPERVISOR,
113+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_ABORT,
114+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_UNDEFINED,
115+ MODE_INVALID, MODE_INVALID, MODE_INVALID, MODE_USER
116+};
117+
118+typedef struct
119+{
120+ u8 *block_offset;
121+ u16 flag_data;
122+ u8 condition;
123+ u8 update_cycles;
124+} BLOCK_DATA_TYPE;
125+
126+typedef struct
127+{
128+ u32 branch_target;
129+ u8 *branch_source;
130+} BLOCK_EXIT_TYPE;
131+
132+
133+#define arm_decode_data_proc_reg() \
134+ u32 rn = (opcode >> 16) & 0x0F; \
135+ u32 rd = (opcode >> 12) & 0x0F; \
136+ u32 rm = opcode & 0x0F \
137+
138+#define arm_decode_data_proc_imm() \
139+ u32 rn = (opcode >> 16) & 0x0F; \
140+ u32 rd = (opcode >> 12) & 0x0F; \
141+ u32 imm; \
142+ ROR(imm, opcode & 0xFF, (opcode >> 7) & 0x1E) \
143+
144+#define arm_decode_psr_reg() \
145+ u32 psr_field = (opcode >> 16) & 0x0F; \
146+ u32 rd = (opcode >> 12) & 0x0F; \
147+ u32 rm = opcode & 0x0F \
148+
149+#define arm_decode_psr_imm() \
150+ u32 psr_field = (opcode >> 16) & 0x0F; \
151+ u32 rd = (opcode >> 12) & 0x0F; \
152+ u32 imm; \
153+ ROR(imm, opcode & 0xFF, (opcode >> 7) & 0x1E) \
154+
155+#define arm_decode_branchx() \
156+ u32 rn = opcode & 0x0F \
157+
158+#define arm_decode_multiply() \
159+ u32 rd = (opcode >> 16) & 0x0F; \
160+ u32 rn = (opcode >> 12) & 0x0F; \
161+ u32 rs = (opcode >> 8) & 0x0F; \
162+ u32 rm = opcode & 0x0F \
163+
164+#define arm_decode_multiply_long() \
165+ u32 rdhi = (opcode >> 16) & 0x0F; \
166+ u32 rdlo = (opcode >> 12) & 0x0F; \
167+ u32 rs = (opcode >> 8) & 0x0F; \
168+ u32 rm = opcode & 0x0F \
169+
170+#define arm_decode_swap() \
171+ u32 rn = (opcode >> 16) & 0x0F; \
172+ u32 rd = (opcode >> 12) & 0x0F; \
173+ u32 rm = opcode & 0x0F \
174+
175+#define arm_decode_half_trans_r() \
176+ u32 rn = (opcode >> 16) & 0x0F; \
177+ u32 rd = (opcode >> 12) & 0x0F; \
178+ u32 rm = opcode & 0x0F \
179+
180+#define arm_decode_half_trans_of() \
181+ u32 rn = (opcode >> 16) & 0x0F; \
182+ u32 rd = (opcode >> 12) & 0x0F; \
183+ u32 offset = ((opcode >> 4) & 0xF0) | (opcode & 0x0F) \
184+
185+#define arm_decode_data_trans_imm() \
186+ u32 rn = (opcode >> 16) & 0x0F; \
187+ u32 rd = (opcode >> 12) & 0x0F; \
188+ u32 offset = opcode & 0x0FFF \
189+
190+#define arm_decode_data_trans_reg() \
191+ u32 rn = (opcode >> 16) & 0x0F; \
192+ u32 rd = (opcode >> 12) & 0x0F; \
193+ u32 rm = opcode & 0x0F \
194+
195+#define arm_decode_block_trans() \
196+ u32 rn = (opcode >> 16) & 0x0F; \
197+ u32 reg_list = opcode & 0xFFFF \
198+
199+#define arm_decode_branch() \
200+ s32 offset = (s32)(opcode << 8) >> 6 \
201+
202+#define thumb_decode_shift() \
203+ u32 imm = (opcode >> 6) & 0x1F; \
204+ u32 rs = (opcode >> 3) & 0x07; \
205+ u32 rd = opcode & 0x07 \
206+
207+#define thumb_decode_add_sub() \
208+ u32 rn = (opcode >> 6) & 0x07; \
209+ u32 rs = (opcode >> 3) & 0x07; \
210+ u32 rd = opcode & 0x07 \
211+
212+#define thumb_decode_add_sub_imm() \
213+ u32 imm = (opcode >> 6) & 0x07; \
214+ u32 rs = (opcode >> 3) & 0x07; \
215+ u32 rd = opcode & 0x07 \
216+
217+#define thumb_decode_imm() \
218+ u32 imm = opcode & 0xFF \
219+
220+#define thumb_decode_alu_op() \
221+ u32 rs = (opcode >> 3) & 0x07; \
222+ u32 rd = opcode & 0x07 \
223+
224+#define thumb_decode_hireg_op() \
225+ u32 rs = (opcode >> 3) & 0x0F; \
226+ u32 rd = ((opcode >> 4) & 0x08) | (opcode & 0x07) \
227+
228+#define thumb_decode_mem_reg() \
229+ u32 ro = (opcode >> 6) & 0x07; \
230+ u32 rb = (opcode >> 3) & 0x07; \
231+ u32 rd = opcode & 0x07 \
232+
233+#define thumb_decode_mem_imm() \
234+ u32 imm = (opcode >> 6) & 0x1F; \
235+ u32 rb = (opcode >> 3) & 0x07; \
236+ u32 rd = opcode & 0x07 \
237+
238+#define thumb_decode_add_sp() \
239+ u32 imm = opcode & 0x7F \
240+
241+#define thumb_decode_rlist() \
242+ u32 reg_list = opcode & 0xFF \
243+
244+#define thumb_decode_branch_cond() \
245+ s32 offset = (s8)(opcode & 0xFF) \
246+
247+#define thumb_decode_swi() \
248+ u32 comment = opcode & 0xFF \
249+
250+#define thumb_decode_branch() \
251+ u32 offset = opcode & 0x07FF \
252+
253+
254+#include "mips_emit.h"
255+
256+#define check_pc_region(pc) \
257+ new_pc_region = (pc >> 15); \
258+ if(new_pc_region != pc_region) \
259+ { \
260+ pc_region = new_pc_region; \
261+ pc_address_block = memory_map_read[new_pc_region]; \
262+ \
263+ if(pc_address_block == NULL) \
264+ { \
265+ pc_address_block = load_gamepak_page(pc_region & 0x3FF); \
266+ } \
267+ } \
268+
269+#define translate_arm_instruction() \
270+ check_pc_region(pc); \
271+ opcode = ADDRESS32(pc_address_block, (pc & 0x7FFF)); \
272+ condition = block_data[block_data_position].condition; \
273+ \
274+ if((condition != last_condition) || (condition >= 0x20)) \
275+ { \
276+ if((last_condition & 0x0F) != 0x0E) \
277+ { \
278+ generate_branch_patch_conditional(backpatch_address, translation_ptr); \
279+ } \
280+ \
281+ last_condition = condition; \
282+ \
283+ condition &= 0x0F; \
284+ \
285+ if(condition != 0x0E) \
286+ { \
287+ arm_conditional_block_header(); \
288+ } \
289+ } \
290+ \
291+ switch((opcode >> 20) & 0xFF) \
292+ { \
293+ case 0x00: \
294+ { \
295+ if((opcode & 0x90) == 0x90) \
296+ { \
297+ if(opcode & 0x20) \
298+ { \
299+ /* STRH rd, [rn], -rm */ \
300+ arm_access_memory(store, down, post, u16, half_reg); \
301+ } \
302+ else \
303+ { \
304+ /* MUL rd, rm, rs */ \
305+ arm_multiply(no, no); \
306+ } \
307+ } \
308+ else \
309+ { \
310+ /* AND rd, rn, reg_op */ \
311+ arm_data_proc(and, reg, no_flags); \
312+ } \
313+ break; \
314+ } \
315+ \
316+ case 0x01: \
317+ { \
318+ if((opcode & 0x90) == 0x90) \
319+ { \
320+ switch((opcode >> 5) & 0x03) \
321+ { \
322+ case 0: \
323+ { \
324+ /* MULS rd, rm, rs */ \
325+ arm_multiply(no, yes); \
326+ break; \
327+ } \
328+ \
329+ case 1: \
330+ { \
331+ /* LDRH rd, [rn], -rm */ \
332+ arm_access_memory(load, down, post, u16, half_reg); \
333+ break; \
334+ } \
335+ \
336+ case 2: \
337+ { \
338+ /* LDRSB rd, [rn], -rm */ \
339+ arm_access_memory(load, down, post, s8, half_reg); \
340+ break; \
341+ } \
342+ \
343+ case 3: \
344+ { \
345+ /* LDRSH rd, [rn], -rm */ \
346+ arm_access_memory(load, down, post, s16, half_reg); \
347+ break; \
348+ } \
349+ } \
350+ } \
351+ else \
352+ { \
353+ /* ANDS rd, rn, reg_op */ \
354+ arm_data_proc(ands, reg_flags, flags); \
355+ } \
356+ break; \
357+ } \
358+ \
359+ case 0x02: \
360+ { \
361+ if((opcode & 0x90) == 0x90) \
362+ { \
363+ if(opcode & 0x20) \
364+ { \
365+ /* STRH rd, [rn], -rm */ \
366+ arm_access_memory(store, down, post, u16, half_reg); \
367+ } \
368+ else \
369+ { \
370+ /* MLA rd, rm, rs, rn */ \
371+ arm_multiply(yes, no); \
372+ } \
373+ } \
374+ else \
375+ { \
376+ /* EOR rd, rn, reg_op */ \
377+ arm_data_proc(eor, reg, no_flags); \
378+ } \
379+ break; \
380+ } \
381+ \
382+ case 0x03: \
383+ { \
384+ if((opcode & 0x90) == 0x90) \
385+ { \
386+ switch((opcode >> 5) & 0x03) \
387+ { \
388+ case 0: \
389+ { \
390+ /* MLAS rd, rm, rs, rn */ \
391+ arm_multiply(yes, yes); \
392+ break; \
393+ } \
394+ \
395+ case 1: \
396+ { \
397+ /* LDRH rd, [rn], -rm */ \
398+ arm_access_memory(load, down, post, u16, half_reg); \
399+ break; \
400+ } \
401+ \
402+ case 2: \
403+ { \
404+ /* LDRSB rd, [rn], -rm */ \
405+ arm_access_memory(load, down, post, s8, half_reg); \
406+ break; \
407+ } \
408+ \
409+ case 3: \
410+ { \
411+ /* LDRSH rd, [rn], -rm */ \
412+ arm_access_memory(load, down, post, s16, half_reg); \
413+ break; \
414+ } \
415+ } \
416+ } \
417+ else \
418+ { \
419+ /* EORS rd, rn, reg_op */ \
420+ arm_data_proc(eors, reg_flags, flags); \
421+ } \
422+ break; \
423+ } \
424+ \
425+ case 0x04: \
426+ { \
427+ if((opcode & 0x90) == 0x90) \
428+ { \
429+ /* STRH rd, [rn], -imm */ \
430+ arm_access_memory(store, down, post, u16, half_imm); \
431+ } \
432+ else \
433+ { \
434+ /* SUB rd, rn, reg_op */ \
435+ arm_data_proc(sub, reg, no_flags); \
436+ } \
437+ break; \
438+ } \
439+ \
440+ case 0x05: \
441+ { \
442+ if((opcode & 0x90) == 0x90) \
443+ { \
444+ switch((opcode >> 5) & 0x03) \
445+ { \
446+ case 1: \
447+ { \
448+ /* LDRH rd, [rn], -imm */ \
449+ arm_access_memory(load, down, post, u16, half_imm); \
450+ break; \
451+ } \
452+ \
453+ case 2: \
454+ { \
455+ /* LDRSB rd, [rn], -imm */ \
456+ arm_access_memory(load, down, post, s8, half_imm); \
457+ break; \
458+ } \
459+ \
460+ case 3: \
461+ { \
462+ /* LDRSH rd, [rn], -imm */ \
463+ arm_access_memory(load, down, post, s16, half_imm); \
464+ break; \
465+ } \
466+ } \
467+ } \
468+ else \
469+ { \
470+ /* SUBS rd, rn, reg_op */ \
471+ arm_data_proc(subs, reg, flags); \
472+ } \
473+ break; \
474+ } \
475+ \
476+ case 0x06: \
477+ { \
478+ if((opcode & 0x90) == 0x90) \
479+ { \
480+ /* STRH rd, [rn], -imm */ \
481+ arm_access_memory(store, down, post, u16, half_imm); \
482+ } \
483+ else \
484+ { \
485+ /* RSB rd, rn, reg_op */ \
486+ arm_data_proc(rsb, reg, no_flags); \
487+ } \
488+ break; \
489+ } \
490+ \
491+ case 0x07: \
492+ { \
493+ if((opcode & 0x90) == 0x90) \
494+ { \
495+ switch((opcode >> 5) & 0x03) \
496+ { \
497+ case 1: \
498+ { \
499+ /* LDRH rd, [rn], -imm */ \
500+ arm_access_memory(load, down, post, u16, half_imm); \
501+ break; \
502+ } \
503+ \
504+ case 2: \
505+ { \
506+ /* LDRSB rd, [rn], -imm */ \
507+ arm_access_memory(load, down, post, s8, half_imm); \
508+ break; \
509+ } \
510+ \
511+ case 3: \
512+ { \
513+ /* LDRSH rd, [rn], -imm */ \
514+ arm_access_memory(load, down, post, s16, half_imm); \
515+ break; \
516+ } \
517+ } \
518+ } \
519+ else \
520+ { \
521+ /* RSBS rd, rn, reg_op */ \
522+ arm_data_proc(rsbs, reg, flags); \
523+ } \
524+ break; \
525+ } \
526+ \
527+ case 0x08: \
528+ { \
529+ if((opcode & 0x90) == 0x90) \
530+ { \
531+ if(opcode & 0x20) \
532+ { \
533+ /* STRH rd, [rn], +rm */ \
534+ arm_access_memory(store, up, post, u16, half_reg); \
535+ } \
536+ else \
537+ { \
538+ /* UMULL rd, rm, rs */ \
539+ arm_multiply_long(u64, no, no); \
540+ } \
541+ } \
542+ else \
543+ { \
544+ /* ADD rd, rn, reg_op */ \
545+ arm_data_proc(add, reg, no_flags); \
546+ } \
547+ break; \
548+ } \
549+ \
550+ case 0x09: \
551+ { \
552+ if((opcode & 0x90) == 0x90) \
553+ { \
554+ switch((opcode >> 5) & 0x03) \
555+ { \
556+ case 0: \
557+ { \
558+ /* UMULLS rdlo, rdhi, rm, rs */ \
559+ arm_multiply_long(u64, no, yes); \
560+ break; \
561+ } \
562+ \
563+ case 1: \
564+ { \
565+ /* LDRH rd, [rn], +rm */ \
566+ arm_access_memory(load, up, post, u16, half_reg); \
567+ break; \
568+ } \
569+ \
570+ case 2: \
571+ { \
572+ /* LDRSB rd, [rn], +rm */ \
573+ arm_access_memory(load, up, post, s8, half_reg); \
574+ break; \
575+ } \
576+ \
577+ case 3: \
578+ { \
579+ /* LDRSH rd, [rn], +rm */ \
580+ arm_access_memory(load, up, post, s16, half_reg); \
581+ break; \
582+ } \
583+ } \
584+ } \
585+ else \
586+ { \
587+ /* ADDS rd, rn, reg_op */ \
588+ arm_data_proc(adds, reg, flags); \
589+ } \
590+ break; \
591+ } \
592+ \
593+ case 0x0A: \
594+ { \
595+ if((opcode & 0x90) == 0x90) \
596+ { \
597+ if(opcode & 0x20) \
598+ { \
599+ /* STRH rd, [rn], +rm */ \
600+ arm_access_memory(store, up, post, u16, half_reg); \
601+ } \
602+ else \
603+ { \
604+ /* UMLAL rd, rm, rs */ \
605+ arm_multiply_long(u64_add, yes, no); \
606+ } \
607+ } \
608+ else \
609+ { \
610+ /* ADC rd, rn, reg_op */ \
611+ arm_data_proc(adc, reg, no_flags); \
612+ } \
613+ break; \
614+ } \
615+ \
616+ case 0x0B: \
617+ { \
618+ if((opcode & 0x90) == 0x90) \
619+ { \
620+ switch((opcode >> 5) & 0x03) \
621+ { \
622+ case 0: \
623+ { \
624+ /* UMLALS rdlo, rdhi, rm, rs */ \
625+ arm_multiply_long(u64_add, yes, yes); \
626+ break; \
627+ } \
628+ \
629+ case 1: \
630+ { \
631+ /* LDRH rd, [rn], +rm */ \
632+ arm_access_memory(load, up, post, u16, half_reg); \
633+ break; \
634+ } \
635+ \
636+ case 2: \
637+ { \
638+ /* LDRSB rd, [rn], +rm */ \
639+ arm_access_memory(load, up, post, s8, half_reg); \
640+ break; \
641+ } \
642+ \
643+ case 3: \
644+ { \
645+ /* LDRSH rd, [rn], +rm */ \
646+ arm_access_memory(load, up, post, s16, half_reg); \
647+ break; \
648+ } \
649+ } \
650+ } \
651+ else \
652+ { \
653+ /* ADCS rd, rn, reg_op */ \
654+ arm_data_proc(adcs, reg, flags); \
655+ } \
656+ break; \
657+ } \
658+ \
659+ case 0x0C: \
660+ { \
661+ if((opcode & 0x90) == 0x90) \
662+ { \
663+ if(opcode & 0x20) \
664+ { \
665+ /* STRH rd, [rn], +imm */ \
666+ arm_access_memory(store, up, post, u16, half_imm); \
667+ } \
668+ else \
669+ { \
670+ /* SMULL rd, rm, rs */ \
671+ arm_multiply_long(s64, no, no); \
672+ } \
673+ } \
674+ else \
675+ { \
676+ /* SBC rd, rn, reg_op */ \
677+ arm_data_proc(sbc, reg, no_flags); \
678+ } \
679+ break; \
680+ } \
681+ \
682+ case 0x0D: \
683+ { \
684+ if((opcode & 0x90) == 0x90) \
685+ { \
686+ switch((opcode >> 5) & 0x03) \
687+ { \
688+ case 0: \
689+ { \
690+ /* SMULLS rdlo, rdhi, rm, rs */ \
691+ arm_multiply_long(s64, no, yes); \
692+ break; \
693+ } \
694+ \
695+ case 1: \
696+ { \
697+ /* LDRH rd, [rn], +imm */ \
698+ arm_access_memory(load, up, post, u16, half_imm); \
699+ break; \
700+ } \
701+ \
702+ case 2: \
703+ { \
704+ /* LDRSB rd, [rn], +imm */ \
705+ arm_access_memory(load, up, post, s8, half_imm); \
706+ break; \
707+ } \
708+ \
709+ case 3: \
710+ { \
711+ /* LDRSH rd, [rn], +imm */ \
712+ arm_access_memory(load, up, post, s16, half_imm); \
713+ break; \
714+ } \
715+ } \
716+ } \
717+ else \
718+ { \
719+ /* SBCS rd, rn, reg_op */ \
720+ arm_data_proc(sbcs, reg, flags); \
721+ } \
722+ break; \
723+ } \
724+ \
725+ case 0x0E: \
726+ { \
727+ if((opcode & 0x90) == 0x90) \
728+ { \
729+ if(opcode & 0x20) \
730+ { \
731+ /* STRH rd, [rn], +imm */ \
732+ arm_access_memory(store, up, post, u16, half_imm); \
733+ } \
734+ else \
735+ { \
736+ /* SMLAL rd, rm, rs */ \
737+ arm_multiply_long(s64_add, yes, no); \
738+ } \
739+ } \
740+ else \
741+ { \
742+ /* RSC rd, rn, reg_op */ \
743+ arm_data_proc(rsc, reg, no_flags); \
744+ } \
745+ break; \
746+ } \
747+ \
748+ case 0x0F: \
749+ { \
750+ if((opcode & 0x90) == 0x90) \
751+ { \
752+ switch((opcode >> 5) & 0x03) \
753+ { \
754+ case 0: \
755+ { \
756+ /* SMLALS rdlo, rdhi, rm, rs */ \
757+ arm_multiply_long(s64_add, yes, yes); \
758+ break; \
759+ } \
760+ \
761+ case 1: \
762+ { \
763+ /* LDRH rd, [rn], +imm */ \
764+ arm_access_memory(load, up, post, u16, half_imm); \
765+ break; \
766+ } \
767+ \
768+ case 2: \
769+ { \
770+ /* LDRSB rd, [rn], +imm */ \
771+ arm_access_memory(load, up, post, s8, half_imm); \
772+ break; \
773+ } \
774+ \
775+ case 3: \
776+ { \
777+ /* LDRSH rd, [rn], +imm */ \
778+ arm_access_memory(load, up, post, s16, half_imm); \
779+ break; \
780+ } \
781+ } \
782+ } \
783+ else \
784+ { \
785+ /* RSCS rd, rn, reg_op */ \
786+ arm_data_proc(rscs, reg, flags); \
787+ } \
788+ break; \
789+ } \
790+ \
791+ case 0x10: \
792+ { \
793+ if((opcode & 0x90) == 0x90) \
794+ { \
795+ if(opcode & 0x20) \
796+ { \
797+ /* STRH rd, [rn - rm] */ \
798+ arm_access_memory(store, down, pre, u16, half_reg); \
799+ } \
800+ else \
801+ { \
802+ /* SWP rd, rm, [rn] */ \
803+ arm_swap(u32); \
804+ } \
805+ } \
806+ else \
807+ { \
808+ /* MRS rd, cpsr */ \
809+ arm_psr(reg, read, cpsr); \
810+ } \
811+ break; \
812+ } \
813+ \
814+ case 0x11: \
815+ { \
816+ if((opcode & 0x90) == 0x90) \
817+ { \
818+ switch((opcode >> 5) & 0x03) \
819+ { \
820+ case 1: \
821+ { \
822+ /* LDRH rd, [rn - rm] */ \
823+ arm_access_memory(load, down, pre, u16, half_reg); \
824+ break; \
825+ } \
826+ \
827+ case 2: \
828+ { \
829+ /* LDRSB rd, [rn - rm] */ \
830+ arm_access_memory(load, down, pre, s8, half_reg); \
831+ break; \
832+ } \
833+ \
834+ case 3: \
835+ { \
836+ /* LDRSH rd, [rn - rm] */ \
837+ arm_access_memory(load, down, pre, s16, half_reg); \
838+ break; \
839+ } \
840+ } \
841+ } \
842+ else \
843+ { \
844+ /* TST rd, rn, reg_op */ \
845+ arm_data_proc_test(tst, reg_flags); \
846+ } \
847+ break; \
848+ } \
849+ \
850+ case 0x12: \
851+ { \
852+ if((opcode & 0x90) == 0x90) \
853+ { \
854+ /* STRH rd, [rn - rm]! */ \
855+ arm_access_memory(store, down, pre_wb, u16, half_reg); \
856+ } \
857+ else \
858+ { \
859+ if(opcode & 0x10) \
860+ { \
861+ /* BX rn */ \
862+ arm_bx(); \
863+ } \
864+ else \
865+ { \
866+ /* MSR cpsr, rm */ \
867+ arm_psr(reg, store, cpsr); \
868+ } \
869+ } \
870+ break; \
871+ } \
872+ \
873+ case 0x13: \
874+ { \
875+ if((opcode & 0x90) == 0x90) \
876+ { \
877+ switch((opcode >> 5) & 0x03) \
878+ { \
879+ case 1: \
880+ { \
881+ /* LDRH rd, [rn - rm]! */ \
882+ arm_access_memory(load, down, pre_wb, u16, half_reg); \
883+ break; \
884+ } \
885+ \
886+ case 2: \
887+ { \
888+ /* LDRSB rd, [rn - rm]! */ \
889+ arm_access_memory(load, down, pre_wb, s8, half_reg); \
890+ break; \
891+ } \
892+ \
893+ case 3: \
894+ { \
895+ /* LDRSH rd, [rn - rm]! */ \
896+ arm_access_memory(load, down, pre_wb, s16, half_reg); \
897+ break; \
898+ } \
899+ } \
900+ } \
901+ else \
902+ { \
903+ /* TEQ rd, rn, reg_op */ \
904+ arm_data_proc_test(teq, reg_flags); \
905+ } \
906+ break; \
907+ } \
908+ \
909+ case 0x14: \
910+ { \
911+ if((opcode & 0x90) == 0x90) \
912+ { \
913+ if(opcode & 0x20) \
914+ { \
915+ /* STRH rd, [rn - imm] */ \
916+ arm_access_memory(store, down, pre, u16, half_imm); \
917+ } \
918+ else \
919+ { \
920+ /* SWPB rd, rm, [rn] */ \
921+ arm_swap(u8); \
922+ } \
923+ } \
924+ else \
925+ { \
926+ /* MRS rd, spsr */ \
927+ arm_psr(reg, read, spsr); \
928+ } \
929+ break; \
930+ } \
931+ \
932+ case 0x15: \
933+ { \
934+ if((opcode & 0x90) == 0x90) \
935+ { \
936+ switch((opcode >> 5) & 0x03) \
937+ { \
938+ case 1: \
939+ { \
940+ /* LDRH rd, [rn - imm] */ \
941+ arm_access_memory(load, down, pre, u16, half_imm); \
942+ break; \
943+ } \
944+ \
945+ case 2: \
946+ { \
947+ /* LDRSB rd, [rn - imm] */ \
948+ arm_access_memory(load, down, pre, s8, half_imm); \
949+ break; \
950+ } \
951+ \
952+ case 3: \
953+ { \
954+ /* LDRSH rd, [rn - imm] */ \
955+ arm_access_memory(load, down, pre, s16, half_imm); \
956+ break; \
957+ } \
958+ } \
959+ } \
960+ else \
961+ { \
962+ /* CMP rn, reg_op */ \
963+ arm_data_proc_test(cmp, reg); \
964+ } \
965+ break; \
966+ } \
967+ \
968+ case 0x16: \
969+ { \
970+ if((opcode & 0x90) == 0x90) \
971+ { \
972+ /* STRH rd, [rn - imm]! */ \
973+ arm_access_memory(store, down, pre_wb, u16, half_imm); \
974+ } \
975+ else \
976+ { \
977+ /* MSR spsr, rm */ \
978+ arm_psr(reg, store, spsr); \
979+ } \
980+ break; \
981+ } \
982+ \
983+ case 0x17: \
984+ { \
985+ if((opcode & 0x90) == 0x90) \
986+ { \
987+ switch((opcode >> 5) & 0x03) \
988+ { \
989+ case 1: \
990+ { \
991+ /* LDRH rd, [rn - imm]! */ \
992+ arm_access_memory(load, down, pre_wb, u16, half_imm); \
993+ break; \
994+ } \
995+ \
996+ case 2: \
997+ { \
998+ /* LDRSB rd, [rn - imm]! */ \
999+ arm_access_memory(load, down, pre_wb, s8, half_imm); \
1000+ break; \
1001+ } \
1002+ \
1003+ case 3: \
1004+ { \
1005+ /* LDRSH rd, [rn - imm]! */ \
1006+ arm_access_memory(load, down, pre_wb, s16, half_imm); \
1007+ break; \
1008+ } \
1009+ } \
1010+ } \
1011+ else \
1012+ { \
1013+ /* CMN rd, rn, reg_op */ \
1014+ arm_data_proc_test(cmn, reg); \
1015+ } \
1016+ break; \
1017+ } \
1018+ \
1019+ case 0x18: \
1020+ { \
1021+ if((opcode & 0x90) == 0x90) \
1022+ { \
1023+ /* STRH rd, [rn + rm] */ \
1024+ arm_access_memory(store, up, pre, u16, half_reg); \
1025+ } \
1026+ else \
1027+ { \
1028+ /* ORR rd, rn, reg_op */ \
1029+ arm_data_proc(orr, reg, no_flags); \
1030+ } \
1031+ break; \
1032+ } \
1033+ \
1034+ case 0x19: \
1035+ { \
1036+ if((opcode & 0x90) == 0x90) \
1037+ { \
1038+ switch((opcode >> 5) & 0x03) \
1039+ { \
1040+ case 1: \
1041+ { \
1042+ /* LDRH rd, [rn + rm] */ \
1043+ arm_access_memory(load, up, pre, u16, half_reg); \
1044+ break; \
1045+ } \
1046+ \
1047+ case 2: \
1048+ { \
1049+ /* LDRSB rd, [rn + rm] */ \
1050+ arm_access_memory(load, up, pre, s8, half_reg); \
1051+ break; \
1052+ } \
1053+ \
1054+ case 3: \
1055+ { \
1056+ /* LDRSH rd, [rn + rm] */ \
1057+ arm_access_memory(load, up, pre, s16, half_reg); \
1058+ break; \
1059+ } \
1060+ } \
1061+ } \
1062+ else \
1063+ { \
1064+ /* ORRS rd, rn, reg_op */ \
1065+ arm_data_proc(orrs, reg_flags, flags); \
1066+ } \
1067+ break; \
1068+ } \
1069+ \
1070+ case 0x1A: \
1071+ { \
1072+ if((opcode & 0x90) == 0x90) \
1073+ { \
1074+ /* STRH rd, [rn + rm]! */ \
1075+ arm_access_memory(store, up, pre_wb, u16, half_reg); \
1076+ } \
1077+ else \
1078+ { \
1079+ /* MOV rd, reg_op */ \
1080+ arm_data_proc_unary(mov, reg, no_flags); \
1081+ } \
1082+ break; \
1083+ } \
1084+ \
1085+ case 0x1B: \
1086+ { \
1087+ if((opcode & 0x90) == 0x90) \
1088+ { \
1089+ switch((opcode >> 5) & 0x03) \
1090+ { \
1091+ case 1: \
1092+ { \
1093+ /* LDRH rd, [rn + rm]! */ \
1094+ arm_access_memory(load, up, pre_wb, u16, half_reg); \
1095+ break; \
1096+ } \
1097+ \
1098+ case 2: \
1099+ { \
1100+ /* LDRSB rd, [rn + rm]! */ \
1101+ arm_access_memory(load, up, pre_wb, s8, half_reg); \
1102+ break; \
1103+ } \
1104+ \
1105+ case 3: \
1106+ { \
1107+ /* LDRSH rd, [rn + rm]! */ \
1108+ arm_access_memory(load, up, pre_wb, s16, half_reg); \
1109+ break; \
1110+ } \
1111+ } \
1112+ } \
1113+ else \
1114+ { \
1115+ /* MOVS rd, reg_op */ \
1116+ arm_data_proc_unary(movs, reg_flags, flags); \
1117+ } \
1118+ break; \
1119+ } \
1120+ \
1121+ case 0x1C: \
1122+ { \
1123+ if((opcode & 0x90) == 0x90) \
1124+ { \
1125+ /* STRH rd, [rn + imm] */ \
1126+ arm_access_memory(store, up, pre, u16, half_imm); \
1127+ } \
1128+ else \
1129+ { \
1130+ /* BIC rd, rn, reg_op */ \
1131+ arm_data_proc(bic, reg, no_flags); \
1132+ } \
1133+ break; \
1134+ } \
1135+ \
1136+ case 0x1D: \
1137+ { \
1138+ if((opcode & 0x90) == 0x90) \
1139+ { \
1140+ switch((opcode >> 5) & 0x03) \
1141+ { \
1142+ case 1: \
1143+ { \
1144+ /* LDRH rd, [rn + imm] */ \
1145+ arm_access_memory(load, up, pre, u16, half_imm); \
1146+ break; \
1147+ } \
1148+ \
1149+ case 2: \
1150+ { \
1151+ /* LDRSB rd, [rn + imm] */ \
1152+ arm_access_memory(load, up, pre, s8, half_imm); \
1153+ break; \
1154+ } \
1155+ \
1156+ case 3: \
1157+ { \
1158+ /* LDRSH rd, [rn + imm] */ \
1159+ arm_access_memory(load, up, pre, s16, half_imm); \
1160+ break; \
1161+ } \
1162+ } \
1163+ } \
1164+ else \
1165+ { \
1166+ /* BICS rd, rn, reg_op */ \
1167+ arm_data_proc(bics, reg_flags, flags); \
1168+ } \
1169+ break; \
1170+ } \
1171+ \
1172+ case 0x1E: \
1173+ { \
1174+ if((opcode & 0x90) == 0x90) \
1175+ { \
1176+ /* STRH rd, [rn + imm]! */ \
1177+ arm_access_memory(store, up, pre_wb, u16, half_imm); \
1178+ } \
1179+ else \
1180+ { \
1181+ /* MVN rd, reg_op */ \
1182+ arm_data_proc_unary(mvn, reg, no_flags); \
1183+ } \
1184+ break; \
1185+ } \
1186+ \
1187+ case 0x1F: \
1188+ { \
1189+ if((opcode & 0x90) == 0x90) \
1190+ { \
1191+ switch((opcode >> 5) & 0x03) \
1192+ { \
1193+ case 1: \
1194+ { \
1195+ /* LDRH rd, [rn + imm]! */ \
1196+ arm_access_memory(load, up, pre_wb, u16, half_imm); \
1197+ break; \
1198+ } \
1199+ \
1200+ case 2: \
1201+ { \
1202+ /* LDRSB rd, [rn + imm]! */ \
1203+ arm_access_memory(load, up, pre_wb, s8, half_imm); \
1204+ break; \
1205+ } \
1206+ \
1207+ case 3: \
1208+ { \
1209+ /* LDRSH rd, [rn + imm]! */ \
1210+ arm_access_memory(load, up, pre_wb, s16, half_imm); \
1211+ break; \
1212+ } \
1213+ } \
1214+ } \
1215+ else \
1216+ { \
1217+ /* MVNS rd, rn, reg_op */ \
1218+ arm_data_proc_unary(mvns, reg_flags, flags); \
1219+ } \
1220+ break; \
1221+ } \
1222+ \
1223+ case 0x20: \
1224+ { \
1225+ /* AND rd, rn, imm */ \
1226+ arm_data_proc(and, imm, no_flags); \
1227+ break; \
1228+ } \
1229+ \
1230+ case 0x21: \
1231+ { \
1232+ /* ANDS rd, rn, imm */ \
1233+ arm_data_proc(ands, imm_flags, flags); \
1234+ break; \
1235+ } \
1236+ \
1237+ case 0x22: \
1238+ { \
1239+ /* EOR rd, rn, imm */ \
1240+ arm_data_proc(eor, imm, no_flags); \
1241+ break; \
1242+ } \
1243+ \
1244+ case 0x23: \
1245+ { \
1246+ /* EORS rd, rn, imm */ \
1247+ arm_data_proc(eors, imm_flags, flags); \
1248+ break; \
1249+ } \
1250+ \
1251+ case 0x24: \
1252+ { \
1253+ /* SUB rd, rn, imm */ \
1254+ arm_data_proc(sub, imm, no_flags); \
1255+ break; \
1256+ } \
1257+ \
1258+ case 0x25: \
1259+ { \
1260+ /* SUBS rd, rn, imm */ \
1261+ arm_data_proc(subs, imm, flags); \
1262+ break; \
1263+ } \
1264+ \
1265+ case 0x26: \
1266+ { \
1267+ /* RSB rd, rn, imm */ \
1268+ arm_data_proc(rsb, imm, no_flags); \
1269+ break; \
1270+ } \
1271+ \
1272+ case 0x27: \
1273+ { \
1274+ /* RSBS rd, rn, imm */ \
1275+ arm_data_proc(rsbs, imm, flags); \
1276+ break; \
1277+ } \
1278+ \
1279+ case 0x28: \
1280+ { \
1281+ /* ADD rd, rn, imm */ \
1282+ arm_data_proc(add, imm, no_flags); \
1283+ break; \
1284+ } \
1285+ \
1286+ case 0x29: \
1287+ { \
1288+ /* ADDS rd, rn, imm */ \
1289+ arm_data_proc(adds, imm, flags); \
1290+ break; \
1291+ } \
1292+ \
1293+ case 0x2A: \
1294+ { \
1295+ /* ADC rd, rn, imm */ \
1296+ arm_data_proc(adc, imm, no_flags); \
1297+ break; \
1298+ } \
1299+ \
1300+ case 0x2B: \
1301+ { \
1302+ /* ADCS rd, rn, imm */ \
1303+ arm_data_proc(adcs, imm, flags); \
1304+ break; \
1305+ } \
1306+ \
1307+ case 0x2C: \
1308+ { \
1309+ /* SBC rd, rn, imm */ \
1310+ arm_data_proc(sbc, imm, no_flags); \
1311+ break; \
1312+ } \
1313+ \
1314+ case 0x2D: \
1315+ { \
1316+ /* SBCS rd, rn, imm */ \
1317+ arm_data_proc(sbcs, imm, flags); \
1318+ break; \
1319+ } \
1320+ \
1321+ case 0x2E: \
1322+ { \
1323+ /* RSC rd, rn, imm */ \
1324+ arm_data_proc(rsc, imm, no_flags); \
1325+ break; \
1326+ } \
1327+ \
1328+ case 0x2F: \
1329+ { \
1330+ /* RSCS rd, rn, imm */ \
1331+ arm_data_proc(rscs, imm, flags); \
1332+ break; \
1333+ } \
1334+ \
1335+ case 0x30: \
1336+ case 0x31: \
1337+ { \
1338+ /* TST rn, imm */ \
1339+ arm_data_proc_test(tst, imm); \
1340+ break; \
1341+ } \
1342+ \
1343+ case 0x32: \
1344+ { \
1345+ /* MSR cpsr, imm */ \
1346+ arm_psr(imm, store, cpsr); \
1347+ break; \
1348+ } \
1349+ \
1350+ case 0x33: \
1351+ { \
1352+ /* TEQ rn, imm */ \
1353+ arm_data_proc_test(teq, imm); \
1354+ break; \
1355+ } \
1356+ \
1357+ case 0x34: \
1358+ case 0x35: \
1359+ { \
1360+ /* CMP rn, imm */ \
1361+ arm_data_proc_test(cmp, imm); \
1362+ break; \
1363+ } \
1364+ \
1365+ case 0x36: \
1366+ { \
1367+ /* MSR spsr, imm */ \
1368+ arm_psr(imm, store, spsr); \
1369+ break; \
1370+ } \
1371+ \
1372+ case 0x37: \
1373+ { \
1374+ /* CMN rn, imm */ \
1375+ arm_data_proc_test(cmn, imm); \
1376+ break; \
1377+ } \
1378+ \
1379+ case 0x38: \
1380+ { \
1381+ /* ORR rd, rn, imm */ \
1382+ arm_data_proc(orr, imm, no_flags); \
1383+ break; \
1384+ } \
1385+ \
1386+ case 0x39: \
1387+ { \
1388+ /* ORRS rd, rn, imm */ \
1389+ arm_data_proc(orrs, imm_flags, flags); \
1390+ break; \
1391+ } \
1392+ \
1393+ case 0x3A: \
1394+ { \
1395+ /* MOV rd, imm */ \
1396+ arm_data_proc_unary(mov, imm, no_flags); \
1397+ break; \
1398+ } \
1399+ \
1400+ case 0x3B: \
1401+ { \
1402+ /* MOVS rd, imm */ \
1403+ arm_data_proc_unary(movs, imm_flags, flags); \
1404+ break; \
1405+ } \
1406+ \
1407+ case 0x3C: \
1408+ { \
1409+ /* BIC rd, rn, imm */ \
1410+ arm_data_proc(bic, imm, no_flags); \
1411+ break; \
1412+ } \
1413+ \
1414+ case 0x3D: \
1415+ { \
1416+ /* BICS rd, rn, imm */ \
1417+ arm_data_proc(bics, imm_flags, flags); \
1418+ break; \
1419+ } \
1420+ \
1421+ case 0x3E: \
1422+ { \
1423+ /* MVN rd, imm */ \
1424+ arm_data_proc_unary(mvn, imm, no_flags); \
1425+ break; \
1426+ } \
1427+ \
1428+ case 0x3F: \
1429+ { \
1430+ /* MVNS rd, imm */ \
1431+ arm_data_proc_unary(mvns, imm_flags, flags); \
1432+ break; \
1433+ } \
1434+ \
1435+ case 0x40: \
1436+ { \
1437+ /* STR rd, [rn], -imm */ \
1438+ arm_access_memory(store, down, post, u32, imm); \
1439+ break; \
1440+ } \
1441+ \
1442+ case 0x41: \
1443+ { \
1444+ /* LDR rd, [rn], -imm */ \
1445+ arm_access_memory(load, down, post, u32, imm); \
1446+ break; \
1447+ } \
1448+ \
1449+ case 0x42: \
1450+ { \
1451+ /* STRT rd, [rn], -imm */ \
1452+ arm_access_memory(store, down, post, u32, imm); \
1453+ break; \
1454+ } \
1455+ \
1456+ case 0x43: \
1457+ { \
1458+ /* LDRT rd, [rn], -imm */ \
1459+ arm_access_memory(load, down, post, u32, imm); \
1460+ break; \
1461+ } \
1462+ \
1463+ case 0x44: \
1464+ { \
1465+ /* STRB rd, [rn], -imm */ \
1466+ arm_access_memory(store, down, post, u8, imm); \
1467+ break; \
1468+ } \
1469+ \
1470+ case 0x45: \
1471+ { \
1472+ /* LDRB rd, [rn], -imm */ \
1473+ arm_access_memory(load, down, post, u8, imm); \
1474+ break; \
1475+ } \
1476+ \
1477+ case 0x46: \
1478+ { \
1479+ /* STRBT rd, [rn], -imm */ \
1480+ arm_access_memory(store, down, post, u8, imm); \
1481+ break; \
1482+ } \
1483+ \
1484+ case 0x47: \
1485+ { \
1486+ /* LDRBT rd, [rn], -imm */ \
1487+ arm_access_memory(load, down, post, u8, imm); \
1488+ break; \
1489+ } \
1490+ \
1491+ case 0x48: \
1492+ { \
1493+ /* STR rd, [rn], +imm */ \
1494+ arm_access_memory(store, up, post, u32, imm); \
1495+ break; \
1496+ } \
1497+ \
1498+ case 0x49: \
1499+ { \
1500+ /* LDR rd, [rn], +imm */ \
1501+ arm_access_memory(load, up, post, u32, imm); \
1502+ break; \
1503+ } \
1504+ \
1505+ case 0x4A: \
1506+ { \
1507+ /* STRT rd, [rn], +imm */ \
1508+ arm_access_memory(store, up, post, u32, imm); \
1509+ break; \
1510+ } \
1511+ \
1512+ case 0x4B: \
1513+ { \
1514+ /* LDRT rd, [rn], +imm */ \
1515+ arm_access_memory(load, up, post, u32, imm); \
1516+ break; \
1517+ } \
1518+ \
1519+ case 0x4C: \
1520+ { \
1521+ /* STRB rd, [rn], +imm */ \
1522+ arm_access_memory(store, up, post, u8, imm); \
1523+ break; \
1524+ } \
1525+ \
1526+ case 0x4D: \
1527+ { \
1528+ /* LDRB rd, [rn], +imm */ \
1529+ arm_access_memory(load, up, post, u8, imm); \
1530+ break; \
1531+ } \
1532+ \
1533+ case 0x4E: \
1534+ { \
1535+ /* STRBT rd, [rn], +imm */ \
1536+ arm_access_memory(store, up, post, u8, imm); \
1537+ break; \
1538+ } \
1539+ \
1540+ case 0x4F: \
1541+ { \
1542+ /* LDRBT rd, [rn], +imm */ \
1543+ arm_access_memory(load, up, post, u8, imm); \
1544+ break; \
1545+ } \
1546+ \
1547+ case 0x50: \
1548+ { \
1549+ /* STR rd, [rn - imm] */ \
1550+ arm_access_memory(store, down, pre, u32, imm); \
1551+ break; \
1552+ } \
1553+ \
1554+ case 0x51: \
1555+ { \
1556+ /* LDR rd, [rn - imm] */ \
1557+ arm_access_memory(load, down, pre, u32, imm); \
1558+ break; \
1559+ } \
1560+ \
1561+ case 0x52: \
1562+ { \
1563+ /* STR rd, [rn - imm]! */ \
1564+ arm_access_memory(store, down, pre_wb, u32, imm); \
1565+ break; \
1566+ } \
1567+ \
1568+ case 0x53: \
1569+ { \
1570+ /* LDR rd, [rn - imm]! */ \
1571+ arm_access_memory(load, down, pre_wb, u32, imm); \
1572+ break; \
1573+ } \
1574+ \
1575+ case 0x54: \
1576+ { \
1577+ /* STRB rd, [rn - imm] */ \
1578+ arm_access_memory(store, down, pre, u8, imm); \
1579+ break; \
1580+ } \
1581+ \
1582+ case 0x55: \
1583+ { \
1584+ /* LDRB rd, [rn - imm] */ \
1585+ arm_access_memory(load, down, pre, u8, imm); \
1586+ break; \
1587+ } \
1588+ \
1589+ case 0x56: \
1590+ { \
1591+ /* STRB rd, [rn - imm]! */ \
1592+ arm_access_memory(store, down, pre_wb, u8, imm); \
1593+ break; \
1594+ } \
1595+ \
1596+ case 0x57: \
1597+ { \
1598+ /* LDRB rd, [rn - imm]! */ \
1599+ arm_access_memory(load, down, pre_wb, u8, imm); \
1600+ break; \
1601+ } \
1602+ \
1603+ case 0x58: \
1604+ { \
1605+ /* STR rd, [rn + imm] */ \
1606+ arm_access_memory(store, up, pre, u32, imm); \
1607+ break; \
1608+ } \
1609+ \
1610+ case 0x59: \
1611+ { \
1612+ /* LDR rd, [rn + imm] */ \
1613+ arm_access_memory(load, up, pre, u32, imm); \
1614+ break; \
1615+ } \
1616+ \
1617+ case 0x5A: \
1618+ { \
1619+ /* STR rd, [rn + imm]! */ \
1620+ arm_access_memory(store, up, pre_wb, u32, imm); \
1621+ break; \
1622+ } \
1623+ \
1624+ case 0x5B: \
1625+ { \
1626+ /* LDR rd, [rn + imm]! */ \
1627+ arm_access_memory(load, up, pre_wb, u32, imm); \
1628+ break; \
1629+ } \
1630+ \
1631+ case 0x5C: \
1632+ { \
1633+ /* STRB rd, [rn + imm] */ \
1634+ arm_access_memory(store, up, pre, u8, imm); \
1635+ break; \
1636+ } \
1637+ \
1638+ case 0x5D: \
1639+ { \
1640+ /* LDRB rd, [rn + imm] */ \
1641+ arm_access_memory(load, up, pre, u8, imm); \
1642+ break; \
1643+ } \
1644+ \
1645+ case 0x5E: \
1646+ { \
1647+ /* STRB rd, [rn + imm]! */ \
1648+ arm_access_memory(store, up, pre_wb, u8, imm); \
1649+ break; \
1650+ } \
1651+ \
1652+ case 0x5F: \
1653+ { \
1654+ /* LDRBT rd, [rn + imm]! */ \
1655+ arm_access_memory(load, up, pre_wb, u8, imm); \
1656+ break; \
1657+ } \
1658+ \
1659+ case 0x60: \
1660+ { \
1661+ /* STR rd, [rn], -rm */ \
1662+ arm_access_memory(store, down, post, u32, reg); \
1663+ break; \
1664+ } \
1665+ \
1666+ case 0x61: \
1667+ { \
1668+ /* LDR rd, [rn], -rm */ \
1669+ arm_access_memory(load, down, post, u32, reg); \
1670+ break; \
1671+ } \
1672+ \
1673+ case 0x62: \
1674+ { \
1675+ /* STRT rd, [rn], -rm */ \
1676+ arm_access_memory(store, down, post, u32, reg); \
1677+ break; \
1678+ } \
1679+ \
1680+ case 0x63: \
1681+ { \
1682+ /* LDRT rd, [rn], -rm */ \
1683+ arm_access_memory(load, down, post, u32, reg); \
1684+ break; \
1685+ } \
1686+ \
1687+ case 0x64: \
1688+ { \
1689+ /* STRB rd, [rn], -rm */ \
1690+ arm_access_memory(store, down, post, u8, reg); \
1691+ break; \
1692+ } \
1693+ \
1694+ case 0x65: \
1695+ { \
1696+ /* LDRB rd, [rn], -rm */ \
1697+ arm_access_memory(load, down, post, u8, reg); \
1698+ break; \
1699+ } \
1700+ \
1701+ case 0x66: \
1702+ { \
1703+ /* STRBT rd, [rn], -rm */ \
1704+ arm_access_memory(store, down, post, u8, reg); \
1705+ break; \
1706+ } \
1707+ \
1708+ case 0x67: \
1709+ { \
1710+ /* LDRBT rd, [rn], -rm */ \
1711+ arm_access_memory(load, down, post, u8, reg); \
1712+ break; \
1713+ } \
1714+ \
1715+ case 0x68: \
1716+ { \
1717+ /* STR rd, [rn], +rm */ \
1718+ arm_access_memory(store, up, post, u32, reg); \
1719+ break; \
1720+ } \
1721+ \
1722+ case 0x69: \
1723+ { \
1724+ /* LDR rd, [rn], +rm */ \
1725+ arm_access_memory(load, up, post, u32, reg); \
1726+ break; \
1727+ } \
1728+ \
1729+ case 0x6A: \
1730+ { \
1731+ /* STRT rd, [rn], +rm */ \
1732+ arm_access_memory(store, up, post, u32, reg); \
1733+ break; \
1734+ } \
1735+ \
1736+ case 0x6B: \
1737+ { \
1738+ /* LDRT rd, [rn], +rm */ \
1739+ arm_access_memory(load, up, post, u32, reg); \
1740+ break; \
1741+ } \
1742+ \
1743+ case 0x6C: \
1744+ { \
1745+ /* STRB rd, [rn], +rm */ \
1746+ arm_access_memory(store, up, post, u8, reg); \
1747+ break; \
1748+ } \
1749+ \
1750+ case 0x6D: \
1751+ { \
1752+ /* LDRB rd, [rn], +rm */ \
1753+ arm_access_memory(load, up, post, u8, reg); \
1754+ break; \
1755+ } \
1756+ \
1757+ case 0x6E: \
1758+ { \
1759+ /* STRBT rd, [rn], +rm */ \
1760+ arm_access_memory(store, up, post, u8, reg); \
1761+ break; \
1762+ } \
1763+ \
1764+ case 0x6F: \
1765+ { \
1766+ /* LDRBT rd, [rn], +rm */ \
1767+ arm_access_memory(load, up, post, u8, reg); \
1768+ break; \
1769+ } \
1770+ \
1771+ case 0x70: \
1772+ { \
1773+ /* STR rd, [rn - rm] */ \
1774+ arm_access_memory(store, down, pre, u32, reg); \
1775+ break; \
1776+ } \
1777+ \
1778+ case 0x71: \
1779+ { \
1780+ /* LDR rd, [rn - rm] */ \
1781+ arm_access_memory(load, down, pre, u32, reg); \
1782+ break; \
1783+ } \
1784+ \
1785+ case 0x72: \
1786+ { \
1787+ /* STR rd, [rn - rm]! */ \
1788+ arm_access_memory(store, down, pre_wb, u32, reg); \
1789+ break; \
1790+ } \
1791+ \
1792+ case 0x73: \
1793+ { \
1794+ /* LDR rd, [rn - rm]! */ \
1795+ arm_access_memory(load, down, pre_wb, u32, reg); \
1796+ break; \
1797+ } \
1798+ \
1799+ case 0x74: \
1800+ { \
1801+ /* STRB rd, [rn - rm] */ \
1802+ arm_access_memory(store, down, pre, u8, reg); \
1803+ break; \
1804+ } \
1805+ \
1806+ case 0x75: \
1807+ { \
1808+ /* LDRB rd, [rn - rm] */ \
1809+ arm_access_memory(load, down, pre, u8, reg); \
1810+ break; \
1811+ } \
1812+ \
1813+ case 0x76: \
1814+ { \
1815+ /* STRB rd, [rn - rm]! */ \
1816+ arm_access_memory(store, down, pre_wb, u8, reg); \
1817+ break; \
1818+ } \
1819+ \
1820+ case 0x77: \
1821+ { \
1822+ /* LDRB rd, [rn - rm]! */ \
1823+ arm_access_memory(load, down, pre_wb, u8, reg); \
1824+ break; \
1825+ } \
1826+ \
1827+ case 0x78: \
1828+ { \
1829+ /* STR rd, [rn + rm] */ \
1830+ arm_access_memory(store, up, pre, u32, reg); \
1831+ break; \
1832+ } \
1833+ \
1834+ case 0x79: \
1835+ { \
1836+ /* LDR rd, [rn + rm] */ \
1837+ arm_access_memory(load, up, pre, u32, reg); \
1838+ break; \
1839+ } \
1840+ \
1841+ case 0x7A: \
1842+ { \
1843+ /* STR rd, [rn + rm]! */ \
1844+ arm_access_memory(store, up, pre_wb, u32, reg); \
1845+ break; \
1846+ } \
1847+ \
1848+ case 0x7B: \
1849+ { \
1850+ /* LDR rd, [rn + rm]! */ \
1851+ arm_access_memory(load, up, pre_wb, u32, reg); \
1852+ break; \
1853+ } \
1854+ \
1855+ case 0x7C: \
1856+ { \
1857+ /* STRB rd, [rn + rm] */ \
1858+ arm_access_memory(store, up, pre, u8, reg); \
1859+ break; \
1860+ } \
1861+ \
1862+ case 0x7D: \
1863+ { \
1864+ /* LDRB rd, [rn + rm] */ \
1865+ arm_access_memory(load, up, pre, u8, reg); \
1866+ break; \
1867+ } \
1868+ \
1869+ case 0x7E: \
1870+ { \
1871+ /* STRB rd, [rn + rm]! */ \
1872+ arm_access_memory(store, up, pre_wb, u8, reg); \
1873+ break; \
1874+ } \
1875+ \
1876+ case 0x7F: \
1877+ { \
1878+ /* LDRBT rd, [rn + rm]! */ \
1879+ arm_access_memory(load, up, pre_wb, u8, reg); \
1880+ break; \
1881+ } \
1882+ \
1883+ case 0x80: \
1884+ { \
1885+ /* STMDA rn, rlist */ \
1886+ arm_block_memory(store, down_a, no, no); \
1887+ break; \
1888+ } \
1889+ \
1890+ case 0x81: \
1891+ { \
1892+ /* LDMDA rn, rlist */ \
1893+ arm_block_memory(load, down_a, no, no); \
1894+ break; \
1895+ } \
1896+ \
1897+ case 0x82: \
1898+ { \
1899+ /* STMDA rn!, rlist */ \
1900+ arm_block_memory(store, down_a, down, no); \
1901+ break; \
1902+ } \
1903+ \
1904+ case 0x83: \
1905+ { \
1906+ /* LDMDA rn!, rlist */ \
1907+ arm_block_memory(load, down_a, down, no); \
1908+ break; \
1909+ } \
1910+ \
1911+ case 0x84: \
1912+ { \
1913+ /* STMDA rn, rlist^ */ \
1914+ arm_block_memory(store, down_a, no, yes); \
1915+ break; \
1916+ } \
1917+ \
1918+ case 0x85: \
1919+ { \
1920+ /* LDMDA rn, rlist^ */ \
1921+ arm_block_memory(load, down_a, no, yes); \
1922+ break; \
1923+ } \
1924+ \
1925+ case 0x86: \
1926+ { \
1927+ /* STMDA rn!, rlist^ */ \
1928+ arm_block_memory(store, down_a, down, yes); \
1929+ break; \
1930+ } \
1931+ \
1932+ case 0x87: \
1933+ { \
1934+ /* LDMDA rn!, rlist^ */ \
1935+ arm_block_memory(load, down_a, down, yes); \
1936+ break; \
1937+ } \
1938+ \
1939+ case 0x88: \
1940+ { \
1941+ /* STMIA rn, rlist */ \
1942+ arm_block_memory(store, no, no, no); \
1943+ break; \
1944+ } \
1945+ \
1946+ case 0x89: \
1947+ { \
1948+ /* LDMIA rn, rlist */ \
1949+ arm_block_memory(load, no, no, no); \
1950+ break; \
1951+ } \
1952+ \
1953+ case 0x8A: \
1954+ { \
1955+ /* STMIA rn!, rlist */ \
1956+ arm_block_memory(store, no, up, no); \
1957+ break; \
1958+ } \
1959+ \
1960+ case 0x8B: \
1961+ { \
1962+ /* LDMIA rn!, rlist */ \
1963+ arm_block_memory(load, no, up, no); \
1964+ break; \
1965+ } \
1966+ \
1967+ case 0x8C: \
1968+ { \
1969+ /* STMIA rn, rlist^ */ \
1970+ arm_block_memory(store, no, no, yes); \
1971+ break; \
1972+ } \
1973+ \
1974+ case 0x8D: \
1975+ { \
1976+ /* LDMIA rn, rlist^ */ \
1977+ arm_block_memory(load, no, no, yes); \
1978+ break; \
1979+ } \
1980+ \
1981+ case 0x8E: \
1982+ { \
1983+ /* STMIA rn!, rlist^ */ \
1984+ arm_block_memory(store, no, up, yes); \
1985+ break; \
1986+ } \
1987+ \
1988+ case 0x8F: \
1989+ { \
1990+ /* LDMIA rn!, rlist^ */ \
1991+ arm_block_memory(load, no, up, yes); \
1992+ break; \
1993+ } \
1994+ \
1995+ case 0x90: \
1996+ { \
1997+ /* STMDB rn, rlist */ \
1998+ arm_block_memory(store, down_b, no, no); \
1999+ break; \
2000+ } \
2001+ \
2002+ case 0x91: \
2003+ { \
2004+ /* LDMDB rn, rlist */ \
2005+ arm_block_memory(load, down_b, no, no); \
2006+ break; \
2007+ } \
2008+ \
2009+ case 0x92: \
2010+ { \
2011+ /* STMDB rn!, rlist */ \
2012+ arm_block_memory(store, down_b, down, no); \
2013+ break; \
2014+ } \
2015+ \
2016+ case 0x93: \
2017+ { \
2018+ /* LDMDB rn!, rlist */ \
2019+ arm_block_memory(load, down_b, down, no); \
2020+ break; \
2021+ } \
2022+ \
2023+ case 0x94: \
2024+ { \
2025+ /* STMDB rn, rlist^ */ \
2026+ arm_block_memory(store, down_b, no, yes); \
2027+ break; \
2028+ } \
2029+ \
2030+ case 0x95: \
2031+ { \
2032+ /* LDMDB rn, rlist^ */ \
2033+ arm_block_memory(load, down_b, no, yes); \
2034+ break; \
2035+ } \
2036+ \
2037+ case 0x96: \
2038+ { \
2039+ /* STMDB rn!, rlist^ */ \
2040+ arm_block_memory(store, down_b, down, yes); \
2041+ break; \
2042+ } \
2043+ \
2044+ case 0x97: \
2045+ { \
2046+ /* LDMDB rn!, rlist^ */ \
2047+ arm_block_memory(load, down_b, down, yes); \
2048+ break; \
2049+ } \
2050+ \
2051+ case 0x98: \
2052+ { \
2053+ /* STMIB rn, rlist */ \
2054+ arm_block_memory(store, up, no, no); \
2055+ break; \
2056+ } \
2057+ \
2058+ case 0x99: \
2059+ { \
2060+ /* LDMIB rn, rlist */ \
2061+ arm_block_memory(load, up, no, no); \
2062+ break; \
2063+ } \
2064+ \
2065+ case 0x9A: \
2066+ { \
2067+ /* STMIB rn!, rlist */ \
2068+ arm_block_memory(store, up, up, no); \
2069+ break; \
2070+ } \
2071+ \
2072+ case 0x9B: \
2073+ { \
2074+ /* LDMIB rn!, rlist */ \
2075+ arm_block_memory(load, up, up, no); \
2076+ break; \
2077+ } \
2078+ \
2079+ case 0x9C: \
2080+ { \
2081+ /* STMIB rn, rlist^ */ \
2082+ arm_block_memory(store, up, no, yes); \
2083+ break; \
2084+ } \
2085+ \
2086+ case 0x9D: \
2087+ { \
2088+ /* LDMIB rn, rlist^ */ \
2089+ arm_block_memory(load, up, no, yes); \
2090+ break; \
2091+ } \
2092+ \
2093+ case 0x9E: \
2094+ { \
2095+ /* STMIB rn!, rlist^ */ \
2096+ arm_block_memory(store, up, up, yes); \
2097+ break; \
2098+ } \
2099+ \
2100+ case 0x9F: \
2101+ { \
2102+ /* LDMIB rn!, rlist^ */ \
2103+ arm_block_memory(load, up, up, yes); \
2104+ break; \
2105+ } \
2106+ \
2107+ case 0xA0: case 0xA1: case 0xA2: case 0xA3: \
2108+ case 0xA4: case 0xA5: case 0xA6: case 0xA7: \
2109+ case 0xA8: case 0xA9: case 0xAA: case 0xAB: \
2110+ case 0xAC: case 0xAD: case 0xAE: case 0xAF: \
2111+ { \
2112+ /* B offset */ \
2113+ arm_b(); \
2114+ break; \
2115+ } \
2116+ \
2117+ case 0xB0: case 0xB1: case 0xB2: case 0xB3: \
2118+ case 0xB4: case 0xB5: case 0xB6: case 0xB7: \
2119+ case 0xB8: case 0xB9: case 0xBA: case 0xBB: \
2120+ case 0xBC: case 0xBD: case 0xBE: case 0xBF: \
2121+ { \
2122+ /* BL offset */ \
2123+ arm_bl(); \
2124+ break; \
2125+ } \
2126+ \
2127+ case 0xC0: case 0xC1: case 0xC2: case 0xC3: \
2128+ case 0xC4: case 0xC5: case 0xC6: case 0xC7: \
2129+ case 0xC8: case 0xC9: case 0xCA: case 0xCB: \
2130+ case 0xCC: case 0xCD: case 0xCE: case 0xCF: \
2131+ case 0xD0: case 0xD1: case 0xD2: case 0xD3: \
2132+ case 0xD4: case 0xD5: case 0xD6: case 0xD7: \
2133+ case 0xD8: case 0xD9: case 0xDA: case 0xDB: \
2134+ case 0xDC: case 0xDD: case 0xDE: case 0xDF: \
2135+ case 0xE0: case 0xE1: case 0xE2: case 0xE3: \
2136+ case 0xE4: case 0xE5: case 0xE6: case 0xE7: \
2137+ case 0xE8: case 0xE9: case 0xEA: case 0xEB: \
2138+ case 0xEC: case 0xED: case 0xEE: case 0xEF: \
2139+ /* coprocessor instructions, reserved on GBA */ \
2140+ break; \
2141+ \
2142+ case 0xF0: case 0xF1: case 0xF2: case 0xF3: \
2143+ case 0xF4: case 0xF5: case 0xF6: case 0xF7: \
2144+ case 0xF8: case 0xF9: case 0xFA: case 0xFB: \
2145+ case 0xFC: case 0xFD: case 0xFE: case 0xFF: \
2146+ { \
2147+ /* SWI comment */ \
2148+ arm_swi(); \
2149+ break; \
2150+ } \
2151+ } \
2152+ \
2153+ pc += 4 \
2154+
2155+
2156+#define arm_flag_status() \
2157+
2158+#define translate_thumb_instruction() \
2159+ flag_status = block_data[block_data_position].flag_data; \
2160+ check_pc_region(pc); \
2161+ last_opcode = opcode; \
2162+ opcode = ADDRESS16(pc_address_block, (pc & 0x7FFF)); \
2163+ \
2164+ switch((opcode >> 8) & 0xFF) \
2165+ { \
2166+ case 0x00: case 0x01: case 0x02: case 0x03: \
2167+ case 0x04: case 0x05: case 0x06: case 0x07: \
2168+ { \
2169+ /* LSL rd, rs, imm */ \
2170+ thumb_shift(shift, lsl, imm); \
2171+ break; \
2172+ } \
2173+ \
2174+ case 0x08: case 0x09: case 0x0A: case 0x0B: \
2175+ case 0x0C: case 0x0D: case 0x0E: case 0x0F: \
2176+ { \
2177+ /* LSR rd, rs, imm */ \
2178+ thumb_shift(shift, lsr, imm); \
2179+ break; \
2180+ } \
2181+ \
2182+ case 0x10: case 0x11: case 0x12: case 0x13: \
2183+ case 0x14: case 0x15: case 0x16: case 0x17: \
2184+ { \
2185+ /* ASR rd, rs, imm */ \
2186+ thumb_shift(shift, asr, imm); \
2187+ break; \
2188+ } \
2189+ \
2190+ case 0x18: \
2191+ case 0x19: \
2192+ { \
2193+ /* ADD rd, rs, rn */ \
2194+ thumb_data_proc(add_sub, adds, reg, rd, rs, rn); \
2195+ break; \
2196+ } \
2197+ \
2198+ case 0x1A: \
2199+ case 0x1B: \
2200+ { \
2201+ /* SUB rd, rs, rn */ \
2202+ thumb_data_proc(add_sub, subs, reg, rd, rs, rn); \
2203+ break; \
2204+ } \
2205+ \
2206+ case 0x1C: \
2207+ case 0x1D: \
2208+ { \
2209+ /* ADD rd, rs, imm */ \
2210+ thumb_data_proc(add_sub_imm, adds, imm, rd, rs, imm); \
2211+ break; \
2212+ } \
2213+ \
2214+ case 0x1E: \
2215+ case 0x1F: \
2216+ { \
2217+ /* SUB rd, rs, imm */ \
2218+ thumb_data_proc(add_sub_imm, subs, imm, rd, rs, imm); \
2219+ break; \
2220+ } \
2221+ \
2222+ case 0x20: \
2223+ { \
2224+ /* MOV r0, imm */ \
2225+ thumb_data_proc_unary(imm, movs, imm, 0, imm); \
2226+ break; \
2227+ } \
2228+ \
2229+ case 0x21: \
2230+ { \
2231+ /* MOV r1, imm */ \
2232+ thumb_data_proc_unary(imm, movs, imm, 1, imm); \
2233+ break; \
2234+ } \
2235+ \
2236+ case 0x22: \
2237+ { \
2238+ /* MOV r2, imm */ \
2239+ thumb_data_proc_unary(imm, movs, imm, 2, imm); \
2240+ break; \
2241+ } \
2242+ \
2243+ case 0x23: \
2244+ { \
2245+ /* MOV r3, imm */ \
2246+ thumb_data_proc_unary(imm, movs, imm, 3, imm); \
2247+ break; \
2248+ } \
2249+ \
2250+ case 0x24: \
2251+ { \
2252+ /* MOV r4, imm */ \
2253+ thumb_data_proc_unary(imm, movs, imm, 4, imm); \
2254+ break; \
2255+ } \
2256+ \
2257+ case 0x25: \
2258+ { \
2259+ /* MOV r5, imm */ \
2260+ thumb_data_proc_unary(imm, movs, imm, 5, imm); \
2261+ break; \
2262+ } \
2263+ \
2264+ case 0x26: \
2265+ { \
2266+ /* MOV r6, imm */ \
2267+ thumb_data_proc_unary(imm, movs, imm, 6, imm); \
2268+ break; \
2269+ } \
2270+ \
2271+ case 0x27: \
2272+ { \
2273+ /* MOV r7, imm */ \
2274+ thumb_data_proc_unary(imm, movs, imm, 7, imm); \
2275+ break; \
2276+ } \
2277+ \
2278+ case 0x28: \
2279+ { \
2280+ /* CMP r0, imm */ \
2281+ thumb_data_proc_test(imm, cmp, imm, 0, imm); \
2282+ break; \
2283+ } \
2284+ \
2285+ case 0x29: \
2286+ { \
2287+ /* CMP r1, imm */ \
2288+ thumb_data_proc_test(imm, cmp, imm, 1, imm); \
2289+ break; \
2290+ } \
2291+ \
2292+ case 0x2A: \
2293+ { \
2294+ /* CMP r2, imm */ \
2295+ thumb_data_proc_test(imm, cmp, imm, 2, imm); \
2296+ break; \
2297+ } \
2298+ \
2299+ case 0x2B: \
2300+ { \
2301+ /* CMP r3, imm */ \
2302+ thumb_data_proc_test(imm, cmp, imm, 3, imm); \
2303+ break; \
2304+ } \
2305+ \
2306+ case 0x2C: \
2307+ { \
2308+ /* CMP r4, imm */ \
2309+ thumb_data_proc_test(imm, cmp, imm, 4, imm); \
2310+ break; \
2311+ } \
2312+ \
2313+ case 0x2D: \
2314+ { \
2315+ /* CMP r5, imm */ \
2316+ thumb_data_proc_test(imm, cmp, imm, 5, imm); \
2317+ break; \
2318+ } \
2319+ \
2320+ case 0x2E: \
2321+ { \
2322+ /* CMP r6, imm */ \
2323+ thumb_data_proc_test(imm, cmp, imm, 6, imm); \
2324+ break; \
2325+ } \
2326+ \
2327+ case 0x2F: \
2328+ { \
2329+ /* CMP r7, imm */ \
2330+ thumb_data_proc_test(imm, cmp, imm, 7, imm); \
2331+ break; \
2332+ } \
2333+ \
2334+ case 0x30: \
2335+ { \
2336+ /* ADD r0, imm */ \
2337+ thumb_data_proc(imm, adds, imm, 0, 0, imm); \
2338+ break; \
2339+ } \
2340+ \
2341+ case 0x31: \
2342+ { \
2343+ /* ADD r1, imm */ \
2344+ thumb_data_proc(imm, adds, imm, 1, 1, imm); \
2345+ break; \
2346+ } \
2347+ \
2348+ case 0x32: \
2349+ { \
2350+ /* ADD r2, imm */ \
2351+ thumb_data_proc(imm, adds, imm, 2, 2, imm); \
2352+ break; \
2353+ } \
2354+ \
2355+ case 0x33: \
2356+ { \
2357+ /* ADD r3, imm */ \
2358+ thumb_data_proc(imm, adds, imm, 3, 3, imm); \
2359+ break; \
2360+ } \
2361+ \
2362+ case 0x34: \
2363+ { \
2364+ /* ADD r4, imm */ \
2365+ thumb_data_proc(imm, adds, imm, 4, 4, imm); \
2366+ break; \
2367+ } \
2368+ \
2369+ case 0x35: \
2370+ { \
2371+ /* ADD r5, imm */ \
2372+ thumb_data_proc(imm, adds, imm, 5, 5, imm); \
2373+ break; \
2374+ } \
2375+ \
2376+ case 0x36: \
2377+ { \
2378+ /* ADD r6, imm */ \
2379+ thumb_data_proc(imm, adds, imm, 6, 6, imm); \
2380+ break; \
2381+ } \
2382+ \
2383+ case 0x37: \
2384+ { \
2385+ /* ADD r7, imm */ \
2386+ thumb_data_proc(imm, adds, imm, 7, 7, imm); \
2387+ break; \
2388+ } \
2389+ \
2390+ case 0x38: \
2391+ { \
2392+ /* SUB r0, imm */ \
2393+ thumb_data_proc(imm, subs, imm, 0, 0, imm); \
2394+ break; \
2395+ } \
2396+ \
2397+ case 0x39: \
2398+ { \
2399+ /* SUB r1, imm */ \
2400+ thumb_data_proc(imm, subs, imm, 1, 1, imm); \
2401+ break; \
2402+ } \
2403+ \
2404+ case 0x3A: \
2405+ { \
2406+ /* SUB r2, imm */ \
2407+ thumb_data_proc(imm, subs, imm, 2, 2, imm); \
2408+ break; \
2409+ } \
2410+ \
2411+ case 0x3B: \
2412+ { \
2413+ /* SUB r3, imm */ \
2414+ thumb_data_proc(imm, subs, imm, 3, 3, imm); \
2415+ break; \
2416+ } \
2417+ \
2418+ case 0x3C: \
2419+ { \
2420+ /* SUB r4, imm */ \
2421+ thumb_data_proc(imm, subs, imm, 4, 4, imm); \
2422+ break; \
2423+ } \
2424+ \
2425+ case 0x3D: \
2426+ { \
2427+ /* SUB r5, imm */ \
2428+ thumb_data_proc(imm, subs, imm, 5, 5, imm); \
2429+ break; \
2430+ } \
2431+ \
2432+ case 0x3E: \
2433+ { \
2434+ /* SUB r6, imm */ \
2435+ thumb_data_proc(imm, subs, imm, 6, 6, imm); \
2436+ break; \
2437+ } \
2438+ \
2439+ case 0x3F: \
2440+ { \
2441+ /* SUB r7, imm */ \
2442+ thumb_data_proc(imm, subs, imm, 7, 7, imm); \
2443+ break; \
2444+ } \
2445+ \
2446+ case 0x40: \
2447+ { \
2448+ switch((opcode >> 6) & 0x03) \
2449+ { \
2450+ case 0x00: \
2451+ { \
2452+ /* AND rd, rs */ \
2453+ thumb_data_proc(alu_op, ands, reg, rd, rd, rs); \
2454+ break; \
2455+ } \
2456+ \
2457+ case 0x01: \
2458+ { \
2459+ /* EOR rd, rs */ \
2460+ thumb_data_proc(alu_op, eors, reg, rd, rd, rs); \
2461+ break; \
2462+ } \
2463+ \
2464+ case 0x02: \
2465+ { \
2466+ /* LSL rd, rs */ \
2467+ thumb_shift(alu_op, lsl, reg); \
2468+ break; \
2469+ } \
2470+ \
2471+ case 0x03: \
2472+ { \
2473+ /* LSR rd, rs */ \
2474+ thumb_shift(alu_op, lsr, reg); \
2475+ break; \
2476+ } \
2477+ } \
2478+ break; \
2479+ } \
2480+ \
2481+ case 0x41: \
2482+ { \
2483+ switch((opcode >> 6) & 0x03) \
2484+ { \
2485+ case 0x00: \
2486+ { \
2487+ /* ASR rd, rs */ \
2488+ thumb_shift(alu_op, asr, reg); \
2489+ break; \
2490+ } \
2491+ \
2492+ case 0x01: \
2493+ { \
2494+ /* ADC rd, rs */ \
2495+ thumb_data_proc(alu_op, adcs, reg, rd, rd, rs); \
2496+ break; \
2497+ } \
2498+ \
2499+ case 0x02: \
2500+ { \
2501+ /* SBC rd, rs */ \
2502+ thumb_data_proc(alu_op, sbcs, reg, rd, rd, rs); \
2503+ break; \
2504+ } \
2505+ \
2506+ case 0x03: \
2507+ { \
2508+ /* ROR rd, rs */ \
2509+ thumb_shift(alu_op, ror, reg); \
2510+ break; \
2511+ } \
2512+ } \
2513+ break; \
2514+ } \
2515+ \
2516+ case 0x42: \
2517+ { \
2518+ switch((opcode >> 6) & 0x03) \
2519+ { \
2520+ case 0x00: \
2521+ { \
2522+ /* TST rd, rs */ \
2523+ thumb_data_proc_test(alu_op, tst, reg, rd, rs); \
2524+ break; \
2525+ } \
2526+ \
2527+ case 0x01: \
2528+ { \
2529+ /* NEG rd, rs */ \
2530+ thumb_data_proc_unary(alu_op, neg, reg, rd, rs); \
2531+ break; \
2532+ } \
2533+ \
2534+ case 0x02: \
2535+ { \
2536+ /* CMP rd, rs */ \
2537+ thumb_data_proc_test(alu_op, cmp, reg, rd, rs); \
2538+ break; \
2539+ } \
2540+ \
2541+ case 0x03: \
2542+ { \
2543+ /* CMN rd, rs */ \
2544+ thumb_data_proc_test(alu_op, cmn, reg, rd, rs); \
2545+ break; \
2546+ } \
2547+ } \
2548+ break; \
2549+ } \
2550+ \
2551+ case 0x43: \
2552+ { \
2553+ switch((opcode >> 6) & 0x03) \
2554+ { \
2555+ case 0x00: \
2556+ { \
2557+ /* ORR rd, rs */ \
2558+ thumb_data_proc(alu_op, orrs, reg, rd, rd, rs); \
2559+ break; \
2560+ } \
2561+ \
2562+ case 0x01: \
2563+ { \
2564+ /* MUL rd, rs */ \
2565+ /* thumb_data_proc(alu_op, muls, reg, rd, rd, rs); */ \
2566+ thumb_data_proc_muls(alu_op, reg, rd, rd, rs); \
2567+ break; \
2568+ } \
2569+ \
2570+ case 0x02: \
2571+ { \
2572+ /* BIC rd, rs */ \
2573+ thumb_data_proc(alu_op, bics, reg, rd, rd, rs); \
2574+ break; \
2575+ } \
2576+ \
2577+ case 0x03: \
2578+ { \
2579+ /* MVN rd, rs */ \
2580+ thumb_data_proc_unary(alu_op, mvns, reg, rd, rs); \
2581+ break; \
2582+ } \
2583+ } \
2584+ break; \
2585+ } \
2586+ \
2587+ case 0x44: \
2588+ { \
2589+ /* ADD rd, rs */ \
2590+ thumb_data_proc_hi(add); \
2591+ break; \
2592+ } \
2593+ \
2594+ case 0x45: \
2595+ { \
2596+ /* CMP rd, rs */ \
2597+ thumb_data_proc_test_hi(cmp); \
2598+ break; \
2599+ } \
2600+ \
2601+ case 0x46: \
2602+ { \
2603+ /* MOV rd, rs */ \
2604+ thumb_data_proc_mov_hi(); \
2605+ break; \
2606+ } \
2607+ \
2608+ case 0x47: \
2609+ { \
2610+ /* BX rs */ \
2611+ thumb_bx(); \
2612+ break; \
2613+ } \
2614+ \
2615+ case 0x48: \
2616+ { \
2617+ /* LDR r0, [pc + imm] */ \
2618+ thumb_access_memory(load, imm, 0, 0, 0, pc_relative, \
2619+ ((pc & ~2) + (imm << 2) + 4), u32); \
2620+ break; \
2621+ } \
2622+ \
2623+ case 0x49: \
2624+ { \
2625+ /* LDR r1, [pc + imm] */ \
2626+ thumb_access_memory(load, imm, 1, 0, 0, pc_relative, \
2627+ ((pc & ~2) + (imm << 2) + 4), u32); \
2628+ break; \
2629+ } \
2630+ \
2631+ case 0x4A: \
2632+ { \
2633+ /* LDR r2, [pc + imm] */ \
2634+ thumb_access_memory(load, imm, 2, 0, 0, pc_relative, \
2635+ ((pc & ~2) + (imm << 2) + 4), u32); \
2636+ break; \
2637+ } \
2638+ \
2639+ case 0x4B: \
2640+ { \
2641+ /* LDR r3, [pc + imm] */ \
2642+ thumb_access_memory(load, imm, 3, 0, 0, pc_relative, \
2643+ ((pc & ~2) + (imm << 2) + 4), u32); \
2644+ break; \
2645+ } \
2646+ \
2647+ case 0x4C: \
2648+ { \
2649+ /* LDR r4, [pc + imm] */ \
2650+ thumb_access_memory(load, imm, 4, 0, 0, pc_relative, \
2651+ ((pc & ~2) + (imm << 2) + 4), u32); \
2652+ break; \
2653+ } \
2654+ \
2655+ case 0x4D: \
2656+ { \
2657+ /* LDR r5, [pc + imm] */ \
2658+ thumb_access_memory(load, imm, 5, 0, 0, pc_relative, \
2659+ ((pc & ~2) + (imm << 2) + 4), u32); \
2660+ break; \
2661+ } \
2662+ \
2663+ case 0x4E: \
2664+ { \
2665+ /* LDR r6, [pc + imm] */ \
2666+ thumb_access_memory(load, imm, 6, 0, 0, pc_relative, \
2667+ ((pc & ~2) + (imm << 2) + 4), u32); \
2668+ break; \
2669+ } \
2670+ \
2671+ case 0x4F: \
2672+ { \
2673+ /* LDR r7, [pc + imm] */ \
2674+ thumb_access_memory(load, imm, 7, 0, 0, pc_relative, \
2675+ ((pc & ~2) + (imm << 2) + 4), u32); \
2676+ break; \
2677+ } \
2678+ \
2679+ case 0x50: \
2680+ case 0x51: \
2681+ { \
2682+ /* STR rd, [rb + ro] */ \
2683+ thumb_access_memory(store, mem_reg, rd, rb, ro, reg_reg, 0, u32); \
2684+ break; \
2685+ } \
2686+ \
2687+ case 0x52: \
2688+ case 0x53: \
2689+ { \
2690+ /* STRH rd, [rb + ro] */ \
2691+ thumb_access_memory(store, mem_reg, rd, rb, ro, reg_reg, 0, u16); \
2692+ break; \
2693+ } \
2694+ \
2695+ case 0x54: \
2696+ case 0x55: \
2697+ { \
2698+ /* STRB rd, [rb + ro] */ \
2699+ thumb_access_memory(store, mem_reg, rd, rb, ro, reg_reg, 0, u8); \
2700+ break; \
2701+ } \
2702+ \
2703+ case 0x56: \
2704+ case 0x57: \
2705+ { \
2706+ /* LDSB rd, [rb + ro] */ \
2707+ thumb_access_memory(load, mem_reg, rd, rb, ro, reg_reg, 0, s8); \
2708+ break; \
2709+ } \
2710+ \
2711+ case 0x58: \
2712+ case 0x59: \
2713+ { \
2714+ /* LDR rd, [rb + ro] */ \
2715+ thumb_access_memory(load, mem_reg, rd, rb, ro, reg_reg, 0, u32); \
2716+ break; \
2717+ } \
2718+ \
2719+ case 0x5A: \
2720+ case 0x5B: \
2721+ { \
2722+ /* LDRH rd, [rb + ro] */ \
2723+ thumb_access_memory(load, mem_reg, rd, rb, ro, reg_reg, 0, u16); \
2724+ break; \
2725+ } \
2726+ \
2727+ case 0x5C: \
2728+ case 0x5D: \
2729+ { \
2730+ /* LDRB rd, [rb + ro] */ \
2731+ thumb_access_memory(load, mem_reg, rd, rb, ro, reg_reg, 0, u8); \
2732+ break; \
2733+ } \
2734+ \
2735+ case 0x5E: \
2736+ case 0x5F: \
2737+ { \
2738+ /* LDSH rd, [rb + ro] */ \
2739+ thumb_access_memory(load, mem_reg, rd, rb, ro, reg_reg, 0, s16); \
2740+ break; \
2741+ } \
2742+ \
2743+ case 0x60: case 0x61: case 0x62: case 0x63: \
2744+ case 0x64: case 0x65: case 0x66: case 0x67: \
2745+ { \
2746+ /* STR rd, [rb + imm] */ \
2747+ thumb_access_memory(store, mem_imm, rd, rb, 0, reg_imm, (imm << 2), \
2748+ u32); \
2749+ break; \
2750+ } \
2751+ \
2752+ case 0x68: case 0x69: case 0x6A: case 0x6B: \
2753+ case 0x6C: case 0x6D: case 0x6E: case 0x6F: \
2754+ { \
2755+ /* LDR rd, [rb + imm] */ \
2756+ thumb_access_memory(load, mem_imm, rd, rb, 0, reg_imm, (imm << 2), u32);\
2757+ break; \
2758+ } \
2759+ \
2760+ case 0x70: case 0x71: case 0x72: case 0x73: \
2761+ case 0x74: case 0x75: case 0x76: case 0x77: \
2762+ { \
2763+ /* STRB rd, [rb + imm] */ \
2764+ thumb_access_memory(store, mem_imm, rd, rb, 0, reg_imm, imm, u8); \
2765+ break; \
2766+ } \
2767+ \
2768+ case 0x78: case 0x79: case 0x7A: case 0x7B: \
2769+ case 0x7C: case 0x7D: case 0x7E: case 0x7F: \
2770+ { \
2771+ /* LDRB rd, [rb + imm] */ \
2772+ thumb_access_memory(load, mem_imm, rd, rb, 0, reg_imm, imm, u8); \
2773+ break; \
2774+ } \
2775+ \
2776+ case 0x80: case 0x81: case 0x82: case 0x83: \
2777+ case 0x84: case 0x85: case 0x86: case 0x87: \
2778+ { \
2779+ /* STRH rd, [rb + imm] */ \
2780+ thumb_access_memory(store, mem_imm, rd, rb, 0, reg_imm, \
2781+ (imm << 1), u16); \
2782+ break; \
2783+ } \
2784+ \
2785+ case 0x88: case 0x89: case 0x8A: case 0x8B: \
2786+ case 0x8C: case 0x8D: case 0x8E: case 0x8F: \
2787+ { \
2788+ /* LDRH rd, [rb + imm] */ \
2789+ thumb_access_memory(load, mem_imm, rd, rb, 0, reg_imm, (imm << 1), u16);\
2790+ break; \
2791+ } \
2792+ \
2793+ case 0x90: \
2794+ { \
2795+ /* STR r0, [sp + imm] */ \
2796+ thumb_access_memory(store, imm, 0, 13, 0, reg_imm_sp, imm, u32); \
2797+ break; \
2798+ } \
2799+ \
2800+ case 0x91: \
2801+ { \
2802+ /* STR r1, [sp + imm] */ \
2803+ thumb_access_memory(store, imm, 1, 13, 0, reg_imm_sp, imm, u32); \
2804+ break; \
2805+ } \
2806+ \
2807+ case 0x92: \
2808+ { \
2809+ /* STR r2, [sp + imm] */ \
2810+ thumb_access_memory(store, imm, 2, 13, 0, reg_imm_sp, imm, u32); \
2811+ break; \
2812+ } \
2813+ \
2814+ case 0x93: \
2815+ { \
2816+ /* STR r3, [sp + imm] */ \
2817+ thumb_access_memory(store, imm, 3, 13, 0, reg_imm_sp, imm, u32); \
2818+ break; \
2819+ } \
2820+ \
2821+ case 0x94: \
2822+ { \
2823+ /* STR r4, [sp + imm] */ \
2824+ thumb_access_memory(store, imm, 4, 13, 0, reg_imm_sp, imm, u32); \
2825+ break; \
2826+ } \
2827+ \
2828+ case 0x95: \
2829+ { \
2830+ /* STR r5, [sp + imm] */ \
2831+ thumb_access_memory(store, imm, 5, 13, 0, reg_imm_sp, imm, u32); \
2832+ break; \
2833+ } \
2834+ \
2835+ case 0x96: \
2836+ { \
2837+ /* STR r6, [sp + imm] */ \
2838+ thumb_access_memory(store, imm, 6, 13, 0, reg_imm_sp, imm, u32); \
2839+ break; \
2840+ } \
2841+ \
2842+ case 0x97: \
2843+ { \
2844+ /* STR r7, [sp + imm] */ \
2845+ thumb_access_memory(store, imm, 7, 13, 0, reg_imm_sp, imm, u32); \
2846+ break; \
2847+ } \
2848+ \
2849+ case 0x98: \
2850+ { \
2851+ /* LDR r0, [sp + imm] */ \
2852+ thumb_access_memory(load, imm, 0, 13, 0, reg_imm_sp, imm, u32); \
2853+ break; \
2854+ } \
2855+ \
2856+ case 0x99: \
2857+ { \
2858+ /* LDR r1, [sp + imm] */ \
2859+ thumb_access_memory(load, imm, 1, 13, 0, reg_imm_sp, imm, u32); \
2860+ break; \
2861+ } \
2862+ \
2863+ case 0x9A: \
2864+ { \
2865+ /* LDR r2, [sp + imm] */ \
2866+ thumb_access_memory(load, imm, 2, 13, 0, reg_imm_sp, imm, u32); \
2867+ break; \
2868+ } \
2869+ \
2870+ case 0x9B: \
2871+ { \
2872+ /* LDR r3, [sp + imm] */ \
2873+ thumb_access_memory(load, imm, 3, 13, 0, reg_imm_sp, imm, u32); \
2874+ break; \
2875+ } \
2876+ \
2877+ case 0x9C: \
2878+ { \
2879+ /* LDR r4, [sp + imm] */ \
2880+ thumb_access_memory(load, imm, 4, 13, 0, reg_imm_sp, imm, u32); \
2881+ break; \
2882+ } \
2883+ \
2884+ case 0x9D: \
2885+ { \
2886+ /* LDR r5, [sp + imm] */ \
2887+ thumb_access_memory(load, imm, 5, 13, 0, reg_imm_sp, imm, u32); \
2888+ break; \
2889+ } \
2890+ \
2891+ case 0x9E: \
2892+ { \
2893+ /* LDR r6, [sp + imm] */ \
2894+ thumb_access_memory(load, imm, 6, 13, 0, reg_imm_sp, imm, u32); \
2895+ break; \
2896+ } \
2897+ \
2898+ case 0x9F: \
2899+ { \
2900+ /* LDR r7, [sp + imm] */ \
2901+ thumb_access_memory(load, imm, 7, 13, 0, reg_imm_sp, imm, u32); \
2902+ break; \
2903+ } \
2904+ \
2905+ case 0xA0: \
2906+ { \
2907+ /* ADD r0, pc, +imm */ \
2908+ thumb_load_pc(0); \
2909+ break; \
2910+ } \
2911+ \
2912+ case 0xA1: \
2913+ { \
2914+ /* ADD r1, pc, +imm */ \
2915+ thumb_load_pc(1); \
2916+ break; \
2917+ } \
2918+ \
2919+ case 0xA2: \
2920+ { \
2921+ /* ADD r2, pc, +imm */ \
2922+ thumb_load_pc(2); \
2923+ break; \
2924+ } \
2925+ \
2926+ case 0xA3: \
2927+ { \
2928+ /* ADD r3, pc, +imm */ \
2929+ thumb_load_pc(3); \
2930+ break; \
2931+ } \
2932+ \
2933+ case 0xA4: \
2934+ { \
2935+ /* ADD r4, pc, +imm */ \
2936+ thumb_load_pc(4); \
2937+ break; \
2938+ } \
2939+ \
2940+ case 0xA5: \
2941+ { \
2942+ /* ADD r5, pc, +imm */ \
2943+ thumb_load_pc(5); \
2944+ break; \
2945+ } \
2946+ \
2947+ case 0xA6: \
2948+ { \
2949+ /* ADD r6, pc, +imm */ \
2950+ thumb_load_pc(6); \
2951+ break; \
2952+ } \
2953+ \
2954+ case 0xA7: \
2955+ { \
2956+ /* ADD r7, pc, +imm */ \
2957+ thumb_load_pc(7); \
2958+ break; \
2959+ } \
2960+ \
2961+ case 0xA8: \
2962+ { \
2963+ /* ADD r0, sp, +imm */ \
2964+ thumb_load_sp(0); \
2965+ break; \
2966+ } \
2967+ \
2968+ case 0xA9: \
2969+ { \
2970+ /* ADD r1, sp, +imm */ \
2971+ thumb_load_sp(1); \
2972+ break; \
2973+ } \
2974+ \
2975+ case 0xAA: \
2976+ { \
2977+ /* ADD r2, sp, +imm */ \
2978+ thumb_load_sp(2); \
2979+ break; \
2980+ } \
2981+ \
2982+ case 0xAB: \
2983+ { \
2984+ /* ADD r3, sp, +imm */ \
2985+ thumb_load_sp(3); \
2986+ break; \
2987+ } \
2988+ \
2989+ case 0xAC: \
2990+ { \
2991+ /* ADD r4, sp, +imm */ \
2992+ thumb_load_sp(4); \
2993+ break; \
2994+ } \
2995+ \
2996+ case 0xAD: \
2997+ { \
2998+ /* ADD r5, sp, +imm */ \
2999+ thumb_load_sp(5); \
3000+ break; \
3001+ } \
3002+ \
3003+ case 0xAE: \
3004+ { \
3005+ /* ADD r6, sp, +imm */ \
3006+ thumb_load_sp(6); \
3007+ break; \
3008+ } \
3009+ \
3010+ case 0xAF: \
3011+ { \
3012+ /* ADD r7, sp, +imm */ \
3013+ thumb_load_sp(7); \
3014+ break; \
3015+ } \
3016+ \
3017+ case 0xB0: \
3018+ { \
3019+ if((opcode >> 7) & 0x01) \
3020+ { \
3021+ /* ADD sp, -imm */ \
3022+ thumb_adjust_sp(-(imm << 2)); \
3023+ } \
3024+ else \
3025+ { \
3026+ /* ADD sp, +imm */ \
3027+ thumb_adjust_sp((imm << 2)); \
3028+ } \
3029+ break; \
3030+ } \
3031+ \
3032+ case 0xB4: \
3033+ { \
3034+ /* PUSH rlist */ \
3035+ thumb_block_memory(store, down, no, 13); \
3036+ break; \
3037+ } \
3038+ \
3039+ case 0xB5: \
3040+ { \
3041+ /* PUSH rlist, lr */ \
3042+ thumb_block_memory(store, push_lr, push_lr, 13); \
3043+ break; \
3044+ } \
3045+ \
3046+ case 0xBC: \
3047+ { \
3048+ /* POP rlist */ \
3049+ thumb_block_memory(load, no, up, 13); \
3050+ break; \
3051+ } \
3052+ \
3053+ case 0xBD: \
3054+ { \
3055+ /* POP rlist, pc */ \
3056+ thumb_block_memory(load, no, pop_pc, 13); \
3057+ break; \
3058+ } \
3059+ \
3060+ case 0xC0: \
3061+ { \
3062+ /* STMIA r0!, rlist */ \
3063+ thumb_block_memory(store, no, up, 0); \
3064+ break; \
3065+ } \
3066+ \
3067+ case 0xC1: \
3068+ { \
3069+ /* STMIA r1!, rlist */ \
3070+ thumb_block_memory(store, no, up, 1); \
3071+ break; \
3072+ } \
3073+ \
3074+ case 0xC2: \
3075+ { \
3076+ /* STMIA r2!, rlist */ \
3077+ thumb_block_memory(store, no, up, 2); \
3078+ break; \
3079+ } \
3080+ \
3081+ case 0xC3: \
3082+ { \
3083+ /* STMIA r3!, rlist */ \
3084+ thumb_block_memory(store, no, up, 3); \
3085+ break; \
3086+ } \
3087+ \
3088+ case 0xC4: \
3089+ { \
3090+ /* STMIA r4!, rlist */ \
3091+ thumb_block_memory(store, no, up, 4); \
3092+ break; \
3093+ } \
3094+ \
3095+ case 0xC5: \
3096+ { \
3097+ /* STMIA r5!, rlist */ \
3098+ thumb_block_memory(store, no, up, 5); \
3099+ break; \
3100+ } \
3101+ \
3102+ case 0xC6: \
3103+ { \
3104+ /* STMIA r6!, rlist */ \
3105+ thumb_block_memory(store, no, up, 6); \
3106+ break; \
3107+ } \
3108+ \
3109+ case 0xC7: \
3110+ { \
3111+ /* STMIA r7!, rlist */ \
3112+ thumb_block_memory(store, no, up, 7); \
3113+ break; \
3114+ } \
3115+ \
3116+ case 0xC8: \
3117+ { \
3118+ /* LDMIA r0!, rlist */ \
3119+ thumb_block_memory(load, no, up, 0); \
3120+ break; \
3121+ } \
3122+ \
3123+ case 0xC9: \
3124+ { \
3125+ /* LDMIA r1!, rlist */ \
3126+ thumb_block_memory(load, no, up, 1); \
3127+ break; \
3128+ } \
3129+ \
3130+ case 0xCA: \
3131+ { \
3132+ /* LDMIA r2!, rlist */ \
3133+ thumb_block_memory(load, no, up, 2); \
3134+ break; \
3135+ } \
3136+ \
3137+ case 0xCB: \
3138+ { \
3139+ /* LDMIA r3!, rlist */ \
3140+ thumb_block_memory(load, no, up, 3); \
3141+ break; \
3142+ } \
3143+ \
3144+ case 0xCC: \
3145+ { \
3146+ /* LDMIA r4!, rlist */ \
3147+ thumb_block_memory(load, no, up, 4); \
3148+ break; \
3149+ } \
3150+ \
3151+ case 0xCD: \
3152+ { \
3153+ /* LDMIA r5!, rlist */ \
3154+ thumb_block_memory(load, no, up, 5); \
3155+ break; \
3156+ } \
3157+ \
3158+ case 0xCE: \
3159+ { \
3160+ /* LDMIA r6!, rlist */ \
3161+ thumb_block_memory(load, no, up, 6); \
3162+ break; \
3163+ } \
3164+ \
3165+ case 0xCF: \
3166+ { \
3167+ /* LDMIA r7!, rlist */ \
3168+ thumb_block_memory(load, no, up, 7); \
3169+ break; \
3170+ } \
3171+ \
3172+ case 0xD0: \
3173+ { \
3174+ /* BEQ label */ \
3175+ thumb_conditional_branch(eq); \
3176+ break; \
3177+ } \
3178+ \
3179+ case 0xD1: \
3180+ { \
3181+ /* BNE label */ \
3182+ thumb_conditional_branch(ne); \
3183+ break; \
3184+ } \
3185+ \
3186+ case 0xD2: \
3187+ { \
3188+ /* BCS label */ \
3189+ thumb_conditional_branch(cs); \
3190+ break; \
3191+ } \
3192+ \
3193+ case 0xD3: \
3194+ { \
3195+ /* BCC label */ \
3196+ thumb_conditional_branch(cc); \
3197+ break; \
3198+ } \
3199+ \
3200+ case 0xD4: \
3201+ { \
3202+ /* BMI label */ \
3203+ thumb_conditional_branch(mi); \
3204+ break; \
3205+ } \
3206+ \
3207+ case 0xD5: \
3208+ { \
3209+ /* BPL label */ \
3210+ thumb_conditional_branch(pl); \
3211+ break; \
3212+ } \
3213+ \
3214+ case 0xD6: \
3215+ { \
3216+ /* BVS label */ \
3217+ thumb_conditional_branch(vs); \
3218+ break; \
3219+ } \
3220+ \
3221+ case 0xD7: \
3222+ { \
3223+ /* BVC label */ \
3224+ thumb_conditional_branch(vc); \
3225+ break; \
3226+ } \
3227+ \
3228+ case 0xD8: \
3229+ { \
3230+ /* BHI label */ \
3231+ thumb_conditional_branch(hi); \
3232+ break; \
3233+ } \
3234+ \
3235+ case 0xD9: \
3236+ { \
3237+ /* BLS label */ \
3238+ thumb_conditional_branch(ls); \
3239+ break; \
3240+ } \
3241+ \
3242+ case 0xDA: \
3243+ { \
3244+ /* BGE label */ \
3245+ thumb_conditional_branch(ge); \
3246+ break; \
3247+ } \
3248+ \
3249+ case 0xDB: \
3250+ { \
3251+ /* BLT label */ \
3252+ thumb_conditional_branch(lt); \
3253+ break; \
3254+ } \
3255+ \
3256+ case 0xDC: \
3257+ { \
3258+ /* BGT label */ \
3259+ thumb_conditional_branch(gt); \
3260+ break; \
3261+ } \
3262+ \
3263+ case 0xDD: \
3264+ { \
3265+ /* BLE label */ \
3266+ thumb_conditional_branch(le); \
3267+ break; \
3268+ } \
3269+ \
3270+ case 0xDF: \
3271+ { \
3272+ /* SWI comment */ \
3273+ thumb_swi(); \
3274+ break; \
3275+ } \
3276+ \
3277+ case 0xE0: case 0xE1: case 0xE2: case 0xE3: \
3278+ case 0xE4: case 0xE5: case 0xE6: case 0xE7: \
3279+ { \
3280+ /* B label */ \
3281+ thumb_b(); \
3282+ break; \
3283+ } \
3284+ \
3285+ case 0xF0: case 0xF1: case 0xF2: case 0xF3: \
3286+ case 0xF4: case 0xF5: case 0xF6: case 0xF7: \
3287+ { \
3288+ /* (low word) BL label */ \
3289+ /* This should possibly generate code if not in conjunction with a BLH \
3290+ next, but I don't think anyone will do that. */ \
3291+ break; \
3292+ } \
3293+ \
3294+ case 0xF8: case 0xF9: case 0xFA: case 0xFB: \
3295+ case 0xFC: case 0xFD: case 0xFE: case 0xFF: \
3296+ { \
3297+ /* (high word) BL label */ \
3298+ /* This might not be preceeding a BL low word (Golden Sun 2), if so \
3299+ it must be handled like an indirect branch. */ \
3300+ if((last_opcode >= 0xF000) && (last_opcode < 0xF800)) \
3301+ { \
3302+ thumb_bl(); \
3303+ } \
3304+ else \
3305+ { \
3306+ thumb_blh(); \
3307+ } \
3308+ break; \
3309+ } \
3310+ } \
3311+ \
3312+ pc += 2 \
3313+
3314+
3315+#define thumb_flag_modifies_all() \
3316+ flag_status |= 0xFF \
3317+
3318+#define thumb_flag_modifies_zn() \
3319+ flag_status |= 0xCC \
3320+
3321+#define thumb_flag_modifies_znc() \
3322+ flag_status |= 0xEE \
3323+
3324+#define thumb_flag_modifies_zn_maybe_c() \
3325+ flag_status |= 0xCE \
3326+
3327+#define thumb_flag_modifies_c() \
3328+ flag_status |= 0x22 \
3329+
3330+#define thumb_flag_requires_c() \
3331+ flag_status |= 0x200 \
3332+
3333+#define thumb_flag_requires_all() \
3334+ flag_status |= 0xF00 \
3335+
3336+#define thumb_flag_status() \
3337+{ \
3338+ u16 flag_status = 0; \
3339+ switch((opcode >> 8) & 0xFF) \
3340+ { \
3341+ /* left shift by imm */ \
3342+ case 0x00: case 0x01: case 0x02: case 0x03: \
3343+ case 0x04: case 0x05: case 0x06: case 0x07: \
3344+ { \
3345+ thumb_flag_modifies_zn(); \
3346+ if(((opcode >> 6) & 0x1F) != 0) \
3347+ { \
3348+ thumb_flag_modifies_c(); \
3349+ } \
3350+ break; \
3351+ } \
3352+ \
3353+ /* right shift by imm */ \
3354+ case 0x08: case 0x09: case 0x0A: case 0x0B: \
3355+ case 0x0C: case 0x0D: case 0x0E: case 0x0F: \
3356+ case 0x10: case 0x11: case 0x12: case 0x13: \
3357+ case 0x14: case 0x15: case 0x16: case 0x17: \
3358+ { \
3359+ thumb_flag_modifies_znc(); \
3360+ break; \
3361+ } \
3362+ \
3363+ /* add, subtract */ \
3364+ case 0x18: case 0x19: case 0x1A: case 0x1B: \
3365+ case 0x1C: case 0x1D: case 0x1E: case 0x1F: \
3366+ { \
3367+ thumb_flag_modifies_all(); \
3368+ break; \
3369+ } \
3370+ \
3371+ /* mov reg, imm */ \
3372+ case 0x20: case 0x21: case 0x22: case 0x23: \
3373+ case 0x24: case 0x25: case 0x26: case 0x27: \
3374+ { \
3375+ thumb_flag_modifies_zn(); \
3376+ break; \
3377+ } \
3378+ \
3379+ /* cmp reg, imm; add, subtract */ \
3380+ case 0x28: case 0x29: case 0x2A: case 0x2B: \
3381+ case 0x2C: case 0x2D: case 0x2E: case 0x2F: \
3382+ case 0x30: case 0x31: case 0x32: case 0x33: \
3383+ case 0x34: case 0x35: case 0x36: case 0x37: \
3384+ case 0x38: case 0x39: case 0x3A: case 0x3B: \
3385+ case 0x3C: case 0x3D: case 0x3E: case 0x3F: \
3386+ { \
3387+ thumb_flag_modifies_all(); \
3388+ break; \
3389+ } \
3390+ \
3391+ case 0x40: \
3392+ { \
3393+ switch((opcode >> 6) & 0x03) \
3394+ { \
3395+ case 0x00: \
3396+ { \
3397+ /* AND rd, rs */ \
3398+ thumb_flag_modifies_zn(); \
3399+ break; \
3400+ } \
3401+ \
3402+ case 0x01: \
3403+ { \
3404+ /* EOR rd, rs */ \
3405+ thumb_flag_modifies_zn(); \
3406+ break; \
3407+ } \
3408+ \
3409+ case 0x02: \
3410+ { \
3411+ /* LSL rd, rs */ \
3412+ thumb_flag_modifies_zn_maybe_c(); \
3413+ break; \
3414+ } \
3415+ \
3416+ case 0x03: \
3417+ { \
3418+ /* LSR rd, rs */ \
3419+ thumb_flag_modifies_zn_maybe_c(); \
3420+ break; \
3421+ } \
3422+ } \
3423+ break; \
3424+ } \
3425+ \
3426+ case 0x41: \
3427+ { \
3428+ switch((opcode >> 6) & 0x03) \
3429+ { \
3430+ case 0x00: \
3431+ { \
3432+ /* ASR rd, rs */ \
3433+ thumb_flag_modifies_zn_maybe_c(); \
3434+ break; \
3435+ } \
3436+ \
3437+ case 0x01: \
3438+ { \
3439+ /* ADC rd, rs */ \
3440+ thumb_flag_modifies_all(); \
3441+ thumb_flag_requires_c(); \
3442+ break; \
3443+ } \
3444+ \
3445+ case 0x02: \
3446+ { \
3447+ /* SBC rd, rs */ \
3448+ thumb_flag_modifies_all(); \
3449+ thumb_flag_requires_c(); \
3450+ break; \
3451+ } \
3452+ \
3453+ case 0x03: \
3454+ { \
3455+ /* ROR rd, rs */ \
3456+ thumb_flag_modifies_zn_maybe_c(); \
3457+ break; \
3458+ } \
3459+ } \
3460+ break; \
3461+ } \
3462+ \
3463+ case 0x42: \
3464+ { \
3465+ if((opcode >> 6) & 0x03) \
3466+ { \
3467+ /* NEG, CMP, CMN */ \
3468+ thumb_flag_modifies_all(); \
3469+ } \
3470+ else \
3471+ { \
3472+ /* TST rd, rs */ \
3473+ thumb_flag_modifies_zn(); \
3474+ } \
3475+ break; \
3476+ } \
3477+ \
3478+ /* ORR, MUL, BIC, MVN */ \
3479+ case 0x43: \
3480+ { \
3481+ thumb_flag_modifies_zn(); \
3482+ break; \
3483+ } \
3484+ \
3485+ case 0x45: \
3486+ { \
3487+ /* CMP rd, rs */ \
3488+ thumb_flag_modifies_all(); \
3489+ break; \
3490+ } \
3491+ \
3492+ /* mov might change PC (fall through if so) */ \
3493+ case 0x46: \
3494+ { \
3495+ if((opcode & 0xFF87) != 0x4687) \
3496+ { \
3497+ break; \
3498+ } \
3499+ } \
3500+ \
3501+ /* branches (can change PC) */ \
3502+ case 0x47: \
3503+ case 0xBD: \
3504+ \
3505+ case 0xD0: case 0xD1: case 0xD2: case 0xD3: \
3506+ case 0xD4: case 0xD5: case 0xD6: case 0xD7: \
3507+ case 0xD8: case 0xD9: case 0xDA: case 0xDB: \
3508+ case 0xDC: case 0xDD: case 0xDE: case 0xDF: \
3509+ case 0xE0: case 0xE1: case 0xE2: case 0xE3: \
3510+ case 0xE4: case 0xE5: case 0xE6: case 0xE7: \
3511+ \
3512+ case 0xF0: case 0xF1: case 0xF2: case 0xF3: \
3513+ case 0xF4: case 0xF5: case 0xF6: case 0xF7: \
3514+ case 0xF8: case 0xF9: case 0xFA: case 0xFB: \
3515+ case 0xFC: case 0xFD: case 0xFE: case 0xFF: \
3516+ { \
3517+ thumb_flag_requires_all(); \
3518+ break; \
3519+ } \
3520+ } \
3521+ \
3522+ block_data[block_data_position].flag_data = flag_status; \
3523+} \
3524+
3525+static u8 *ram_block_ptrs[1024 * 64];
3526+static u32 ram_block_tag_top = 0x0101;
3527+
3528+static u8 *bios_block_ptrs[1024 * 8];
3529+static u32 bios_block_tag_top = 0x0101;
3530+
3531+// This function will return a pointer to a translated block of code. If it
3532+// doesn't exist it will translate it, if it does it will pass it back.
3533+
3534+// type should be "arm", "thumb", or "dual." For arm or thumb the PC should
3535+// be a real PC, for dual the least significant bit will determine if it's
3536+// ARM or Thumb mode.
3537+
3538+#define block_lookup_address_pc_arm() \
3539+ pc &= ~0x03 \
3540+
3541+#define block_lookup_address_pc_thumb() \
3542+ pc &= ~0x01 \
3543+
3544+#define block_lookup_address_pc_dual() \
3545+ u32 thumb = pc & 0x01; \
3546+ if(thumb) \
3547+ { \
3548+ pc &= ~0x01; \
3549+ reg[REG_CPSR] |= 0x20; \
3550+ } \
3551+ else \
3552+ { \
3553+ pc = (pc + 2) & ~0x03; \
3554+ reg[REG_CPSR] &= ~0x20; \
3555+ } \
3556+
3557+#define ram_translation_region TRANSLATION_REGION_RAM
3558+#define rom_translation_region TRANSLATION_REGION_ROM
3559+#define bios_translation_region TRANSLATION_REGION_BIOS
3560+
3561+#define block_lookup_translate_arm(mem_type, smc_enable) \
3562+ translation_result \
3563+ = translate_block_arm(pc, mem_type##_translation_region, smc_enable) \
3564+
3565+#define block_lookup_translate_thumb(mem_type, smc_enable) \
3566+ translation_result \
3567+ = translate_block_thumb(pc, mem_type##_translation_region, smc_enable) \
3568+
3569+#define block_lookup_translate_dual(mem_type, smc_enable) \
3570+ if(thumb) \
3571+ { \
3572+ translation_result \
3573+ = translate_block_thumb(pc, mem_type##_translation_region, smc_enable); \
3574+ } \
3575+ else \
3576+ { \
3577+ translation_result \
3578+ = translate_block_arm(pc, mem_type##_translation_region, smc_enable); \
3579+ } \
3580+
3581+// 0x0101 is the smallest tag that can be used. 0xFFFF is marked
3582+// in the middle of blocks and used for write guarding, it doesn't
3583+// indicate a valid block either (it's okay to compile a new block
3584+// that overlaps the earlier one, although this should be relatively
3585+// uncommon)
3586+
3587+#define fill_tag_arm(mem_type) \
3588+ location[0] = mem_type##_block_tag_top; \
3589+ location[1] = 0xFFFF \
3590+
3591+#define fill_tag_thumb(mem_type) \
3592+ *location = mem_type##_block_tag_top \
3593+
3594+#define fill_tag_dual(mem_type) \
3595+ if(thumb) \
3596+ { \
3597+ fill_tag_thumb(mem_type); \
3598+ } \
3599+ else \
3600+ { \
3601+ fill_tag_arm(mem_type); \
3602+ } \
3603+
3604+#define block_lookup_translate(instruction_type, mem_type, smc_enable) \
3605+ block_tag = *location; \
3606+ if((block_tag < 0x0101) || (block_tag == 0xFFFF)) \
3607+ { \
3608+ __label__ redo; \
3609+ s32 translation_result; \
3610+ \
3611+ redo: \
3612+ \
3613+ translation_recursion_level++; \
3614+ block_address = mem_type##_translation_ptr + block_prologue_size; \
3615+ mem_type##_block_ptrs[mem_type##_block_tag_top] = block_address; \
3616+ fill_tag_##instruction_type(mem_type); \
3617+ mem_type##_block_tag_top++; \
3618+ \
3619+ block_lookup_translate_##instruction_type(mem_type, smc_enable); \
3620+ translation_recursion_level--; \
3621+ \
3622+ /* If the translation failed then pass that failure on if we're in \
3623+ a recursive level, or try again if we've hit the bottom. */ \
3624+ if(translation_result == -1) \
3625+ { \
3626+ if(translation_recursion_level) \
3627+ { \
3628+ return NULL; \
3629+ } \
3630+ goto redo; \
3631+ } \
3632+ \
3633+ if(translation_recursion_level == 0) \
3634+ { \
3635+ translate_invalidate_dcache(); \
3636+ } \
3637+ } \
3638+ else \
3639+ { \
3640+ block_address = mem_type##_block_ptrs[block_tag]; \
3641+ } \
3642+
3643+static u32 translation_recursion_level = 0;
3644+static u32 translation_flush_count = 0;
3645+
3646+#define block_lookup_address_body(type) \
3647+{ \
3648+ u16 *location; \
3649+ u32 block_tag; \
3650+ u8 *block_address; \
3651+ \
3652+ /* Starting at the beginning, we allow for one translation cache flush. */ \
3653+ if(translation_recursion_level == 0) \
3654+ { \
3655+ translation_flush_count = 0; \
3656+ } \
3657+ block_lookup_address_pc_##type(); \
3658+ \
3659+ switch(pc >> 24) \
3660+ { \
3661+ case 0x0: \
3662+ { \
3663+ bios_region_read_allow(); \
3664+ location = (u16 *)(bios_rom + pc + 0x4000); \
3665+ block_lookup_translate(type, bios, 0); \
3666+ if(translation_recursion_level == 0) \
3667+ { \
3668+ bios_region_read_allow(); \
3669+ } \
3670+ break; \
3671+ } \
3672+ \
3673+ case 0x2: \
3674+ { \
3675+ location = (u16 *)(ewram + (pc & 0x7FFF) + ((pc & 0x38000) << 1)); \
3676+ block_lookup_translate(type, ram, 1); \
3677+ break; \
3678+ } \
3679+ \
3680+ case 0x3: \
3681+ { \
3682+ location = (u16 *)(iwram + (pc & 0x7FFF)); \
3683+ block_lookup_translate(type, ram, 1); \
3684+ break; \
3685+ } \
3686+ \
3687+ case 0x8: case 0x9: \
3688+ case 0xA: case 0xB: \
3689+ case 0xC: case 0xD: \
3690+ { \
3691+ u32 hash_target = ((pc * 2654435761U) >> 16) & \
3692+ (ROM_BRANCH_HASH_SIZE - 1); \
3693+ u32 *block_ptr = rom_branch_hash[hash_target]; \
3694+ u32 **block_ptr_address = rom_branch_hash + hash_target; \
3695+ \
3696+ while(block_ptr) \
3697+ { \
3698+ if(block_ptr[0] == pc) \
3699+ { \
3700+ block_address = (u8 *)(block_ptr + 2) + block_prologue_size; \
3701+ break; \
3702+ } \
3703+ block_ptr_address = (u32 **)(block_ptr + 1); \
3704+ block_ptr = (u32 *)block_ptr[1]; \
3705+ } \
3706+ \
3707+ if(block_ptr == NULL) \
3708+ { \
3709+ __label__ redo; \
3710+ s32 translation_result; \
3711+ \
3712+ redo: \
3713+ \
3714+ translation_recursion_level++; \
3715+ ((u32 *)rom_translation_ptr)[0] = pc; \
3716+ ((u32 **)rom_translation_ptr)[1] = NULL; \
3717+ *block_ptr_address = (u32 *)rom_translation_ptr; \
3718+ rom_translation_ptr += 8; \
3719+ block_address = rom_translation_ptr + block_prologue_size; \
3720+ block_lookup_translate_##type(rom, 0); \
3721+ translation_recursion_level--; \
3722+ \
3723+ /* If the translation failed then pass that failure on if we're in \
3724+ a recursive level, or try again if we've hit the bottom. */ \
3725+ if(translation_result == -1) \
3726+ { \
3727+ if(translation_recursion_level) \
3728+ { \
3729+ return NULL; \
3730+ } \
3731+ goto redo; \
3732+ } \
3733+ \
3734+ if(translation_recursion_level == 0) \
3735+ { \
3736+ translate_invalidate_dcache(); \
3737+ } \
3738+ } \
3739+ break; \
3740+ } \
3741+ \
3742+ default: \
3743+ { \
3744+ /* If we're at the bottom, it means we're actually trying to jump to an \
3745+ address that we can't handle. Otherwise, it means that code scanned \
3746+ has reached an address that can't be handled, which means that we \
3747+ have most likely hit an area that doesn't contain code yet (for \
3748+ instance, in RAM). If such a thing happens, return -1 and the \
3749+ block translater will naively link it (it'll be okay, since it \
3750+ should never be hit) */ \
3751+ if(translation_recursion_level == 0) \
3752+ { \
3753+ char buffer[256]; \
3754+ video_resolution_large(); \
3755+ sprintf(buffer, "bad jump %x (%x)\n", (int)pc, (int)reg[REG_PC]); \
3756+ print_string(buffer, 0xFFFF, 0x0000, 5, 5); \
3757+ sceKernelDelayThread(3000000); \
3758+ quit(); \
3759+ } \
3760+ block_address = (u8 *)(-1); \
3761+ break; \
3762+ } \
3763+ } \
3764+ \
3765+ return block_address; \
3766+} \
3767+
3768+u8 *block_lookup_address_arm(u32 pc)
3769+ block_lookup_address_body(arm);
3770+
3771+u8 *block_lookup_address_thumb(u32 pc)
3772+ block_lookup_address_body(thumb);
3773+
3774+u8 *block_lookup_address_dual(u32 pc)
3775+ block_lookup_address_body(dual);
3776+
3777+// Potential exit point: If the rd field is pc for instructions is 0x0F,
3778+// the instruction is b/bl/bx, or the instruction is ldm with PC in the
3779+// register list.
3780+// All instructions with upper 3 bits less than 100b have an rd field
3781+// except bx, where the bits must be 0xF there anyway, multiplies,
3782+// which cannot have 0xF in the corresponding fields, and msr, which
3783+// has 0x0F there but doesn't end things (therefore must be special
3784+// checked against). Because MSR and BX overlap both are checked for.
3785+
3786+#define arm_exit_point \
3787+ (((opcode < 0x8000000) && ((opcode & 0x000F000) == 0x000F000) && \
3788+ ((opcode & 0xDB0F000) != 0x120F000)) || \
3789+ ((opcode & 0x12FFF10) == 0x12FFF10) || \
3790+ ((opcode & 0x8108000) == 0x8108000) || \
3791+ ((opcode >= 0xA000000) && (opcode < 0xF000000)) || \
3792+ ((opcode >= 0xF000000) && (!swi_hle_handle[((opcode >> 16) & 0xFF)]))) \
3793+
3794+#define arm_opcode_branch \
3795+ ((opcode & 0xE000000) == 0xA000000) \
3796+
3797+#define arm_opcode_swi \
3798+ ((opcode & 0xF000000) == 0xF000000) \
3799+
3800+#define arm_opcode_unconditional_branch \
3801+ (condition == 0x0E) \
3802+
3803+#define arm_load_opcode() \
3804+ opcode = ADDRESS32(pc_address_block, (block_end_pc & 0x7FFF)); \
3805+ condition = opcode >> 28; \
3806+ \
3807+ opcode &= 0xFFFFFFF; \
3808+ \
3809+ block_end_pc += 4 \
3810+
3811+#define arm_branch_target() \
3812+ branch_target = (block_end_pc + 4 + ((s32)(opcode << 8) >> 6)) \
3813+
3814+// Contiguous conditional block flags modification - it will set 0x20 in the
3815+// condition's bits if this instruction modifies flags. Taken from the CPU
3816+// switch so it'd better be right this time.
3817+
3818+#define arm_set_condition(_condition) \
3819+ block_data[block_data_position].condition = _condition; \
3820+ switch((opcode >> 20) & 0xFF) \
3821+ { \
3822+ case 0x01: \
3823+ case 0x03: \
3824+ case 0x09: \
3825+ case 0x0B: \
3826+ case 0x0D: \
3827+ case 0x0F: \
3828+ { \
3829+ if((((opcode >> 5) & 0x03) == 0) || ((opcode & 0x90) != 0x90)) \
3830+ { \
3831+ block_data[block_data_position].condition |= 0x20; \
3832+ } \
3833+ break; \
3834+ } \
3835+ \
3836+ case 0x05: \
3837+ case 0x07: \
3838+ case 0x11: \
3839+ case 0x13: \
3840+ case 0x15: case 0x16: case 0x17: \
3841+ case 0x19: \
3842+ case 0x1B: \
3843+ case 0x1D: \
3844+ case 0x1F: \
3845+ { \
3846+ if((opcode & 0x90) != 0x90) \
3847+ { \
3848+ block_data[block_data_position].condition |= 0x20; \
3849+ } \
3850+ break; \
3851+ } \
3852+ \
3853+ case 0x12: \
3854+ { \
3855+ if(((opcode & 0x90) != 0x90) && !(opcode & 0x10)) \
3856+ { \
3857+ block_data[block_data_position].condition |= 0x20; \
3858+ } \
3859+ break; \
3860+ } \
3861+ \
3862+ case 0x21: \
3863+ case 0x23: \
3864+ case 0x25: \
3865+ case 0x27: \
3866+ case 0x29: \
3867+ case 0x2B: \
3868+ case 0x2D: \
3869+ case 0x2F: \
3870+ case 0x30: case 0x31: case 0x32: case 0x33: \
3871+ case 0x34: case 0x35: case 0x36: case 0x37: \
3872+ case 0x39: \
3873+ case 0x3B: \
3874+ case 0x3D: \
3875+ case 0x3F: \
3876+ { \
3877+ block_data[block_data_position].condition |= 0x20; \
3878+ break; \
3879+ } \
3880+ } \
3881+
3882+#define arm_link_block() \
3883+ translation_target = block_lookup_address_arm(branch_target) \
3884+
3885+#define arm_instruction_width 4
3886+
3887+// For now this just sets a variable that says flags should always be
3888+// computed.
3889+
3890+#define arm_dead_flag_eliminate() \
3891+ flag_status = 0xF \
3892+
3893+
3894+// The following Thumb instructions can exit:
3895+// b, bl, bx, swi, pop {... pc}, and mov pc, ..., the latter being a hireg
3896+// op only. Rather simpler to identify than the ARM set.
3897+
3898+#define thumb_exit_point \
3899+ (((opcode >= 0xD000) && (opcode < 0xDF00)) || /* conditional branch */ \
3900+ (((opcode & 0xFF00) == 0xDF00) && (!swi_hle_handle[opcode & 0xFF])) || \
3901+ ((opcode >= 0xE000) && (opcode < 0xE800)) || /* B label */ \
3902+ ((opcode & 0xFF00) == 0x4700) || /* BX rs */ \
3903+ ((opcode & 0xFF00) == 0xBD00) || /* POP rlist, pc */ \
3904+ ((opcode & 0xFF87) == 0x4687) || /* MOV rd, rs (rd = pc) */ \
3905+ ((opcode >= 0xF800))) /* BL label */ \
3906+
3907+#define thumb_opcode_branch \
3908+ (((opcode >= 0xD000) && (opcode < 0xDF00)) || \
3909+ ((opcode >= 0xE000) && (opcode < 0xE800)) || \
3910+ (opcode >= 0xF800)) \
3911+
3912+#define thumb_opcode_swi \
3913+ ((opcode & 0xFF00) == 0xDF00) \
3914+
3915+#define thumb_opcode_unconditional_branch \
3916+ ((opcode < 0xD000) || (opcode >= 0xDF00)) \
3917+
3918+#define thumb_load_opcode() \
3919+ last_opcode = opcode; \
3920+ opcode = ADDRESS16(pc_address_block, (block_end_pc & 0x7FFF)); \
3921+ \
3922+ block_end_pc += 2 \
3923+
3924+#define thumb_branch_target() \
3925+ if(opcode < 0xDF00) \
3926+ { \
3927+ branch_target = block_end_pc + 2 + ((s32)(opcode << 24) >> 23); \
3928+ } \
3929+ else \
3930+ if(opcode < 0xE800) \
3931+ { \
3932+ branch_target = block_end_pc + 2 + ((s32)(opcode << 21) >> 20); \
3933+ } \
3934+ else \
3935+ if((last_opcode >= 0xF000) && (last_opcode < 0xF800)) \
3936+ { \
3937+ branch_target = block_end_pc + \
3938+ ((s32)(last_opcode << 21) >> 9) + ((opcode & 0x07FF) << 1); \
3939+ } \
3940+ else \
3941+ { \
3942+ goto no_direct_branch; \
3943+ } \
3944+
3945+#define thumb_set_condition(_condition) \
3946+
3947+#define thumb_link_block() \
3948+ if(branch_target != 0x00000008) \
3949+ { \
3950+ translation_target = block_lookup_address_thumb(branch_target); \
3951+ } \
3952+ else \
3953+ { \
3954+ translation_target = block_lookup_address_arm(branch_target); \
3955+ } \
3956+
3957+#define thumb_instruction_width 2
3958+
3959+
3960+// Here's how this works: each instruction has three different sets of flag
3961+// attributes, each consisiting of a 4bit mask describing how that instruction
3962+// interacts with the 4 main flags (N/Z/C/V).
3963+// The first set, in bits 0:3, is the set of flags the instruction may
3964+// modify. After this pass this is changed to the set of flags the instruction
3965+// should modify - if the bit for the corresponding flag is not set then code
3966+// does not have to be generated to calculate the flag for that instruction.
3967+
3968+// The second set, in bits 7:4, is the set of flags that the instruction must
3969+// modify (ie, for shifts by the register values the instruction may not
3970+// always modify the C flag, and thus the C bit won't be set here).
3971+
3972+// The third set, in bits 11:8, is the set of flags that the instruction uses
3973+// in its computation, or the set of flags that will be needed after the
3974+// instruction is done. For any instructions that change the PC all of the
3975+// bits should be set because it is (for now) unknown what flags will be
3976+// needed after it arrives at its destination. Instructions that use the
3977+// carry flag as input will have it set as well.
3978+
3979+// The algorithm is a simple liveness analysis procedure: It starts at the
3980+// bottom of the instruction stream and sets a "currently needed" mask to
3981+// the flags needed mask of the current instruction. Then it moves down
3982+// an instruction, ANDs that instructions "should generate" mask by the
3983+// "currently needed" mask, then ANDs the "currently needed" mask by
3984+// the 1's complement of the instruction's "must generate" mask, and ORs
3985+// the "currently needed" mask by the instruction's "flags needed" mask.
3986+
3987+#define thumb_dead_flag_eliminate() \
3988+{ \
3989+ u32 needed_mask; \
3990+ needed_mask = block_data[block_data_position].flag_data >> 8; \
3991+ \
3992+ block_data_position--; \
3993+ while(block_data_position >= 0) \
3994+ { \
3995+ flag_status = block_data[block_data_position].flag_data; \
3996+ block_data[block_data_position].flag_data = (flag_status & needed_mask); \
3997+ needed_mask &= ~((flag_status >> 4) & 0x0F); \
3998+ needed_mask |= flag_status >> 8; \
3999+ block_data_position--; \
4000+ } \
4001+} \
4002+
4003+#define MAX_BLOCK_SIZE 8192
4004+#define MAX_EXITS 256
4005+
4006+BLOCK_DATA_TYPE block_data[MAX_BLOCK_SIZE];
4007+BLOCK_EXIT_TYPE block_exits[MAX_EXITS];
4008+
4009+#define smc_write_arm_yes() \
4010+ if(ADDRESS32(pc_address_block, (block_end_pc & 0x7FFC) - 0x8000) == 0x0000) \
4011+ { \
4012+ ADDRESS32(pc_address_block, (block_end_pc & 0x7FFC) - 0x8000) = \
4013+ 0xFFFFFFFF; \
4014+ } \
4015+
4016+#define smc_write_thumb_yes() \
4017+ if(ADDRESS16(pc_address_block, (block_end_pc & 0x7FFe) - 0x8000) == 0x0000) \
4018+ { \
4019+ ADDRESS16(pc_address_block, (block_end_pc & 0x7FFe) - 0x8000) = 0xFFFF; \
4020+ } \
4021+
4022+#define smc_write_arm_no() \
4023+
4024+#define smc_write_thumb_no() \
4025+
4026+#define scan_block(type, smc_write_op) \
4027+{ \
4028+ __label__ block_end; \
4029+ /* Find the end of the block */ \
4030+ do \
4031+ { \
4032+ check_pc_region(block_end_pc); \
4033+ smc_write_##type##_##smc_write_op(); \
4034+ type##_load_opcode(); \
4035+ type##_flag_status(); \
4036+ \
4037+ if(type##_exit_point) \
4038+ { \
4039+ /* Branch/branch with link */ \
4040+ if(type##_opcode_branch) \
4041+ { \
4042+ __label__ no_direct_branch; \
4043+ type##_branch_target(); \
4044+ block_exits[block_exit_position].branch_target = branch_target; \
4045+ block_exit_position++; \
4046+ \
4047+ /* Give the branch target macro somewhere to bail if it turns out to \
4048+ be an indirect branch (ala malformed Thumb bl) */ \
4049+ no_direct_branch:; \
4050+ } \
4051+ \
4052+ /* SWI branches to the BIOS, this will likely change when \
4053+ some HLE BIOS is implemented. */ \
4054+ if(type##_opcode_swi) \
4055+ { \
4056+ block_exits[block_exit_position].branch_target = 0x00000008; \
4057+ block_exit_position++; \
4058+ } \
4059+ \
4060+ type##_set_condition(condition | 0x10); \
4061+ \
4062+ /* Only unconditional branches can end the block. */ \
4063+ if(type##_opcode_unconditional_branch) \
4064+ { \
4065+ /* Check to see if any prior block exits branch after here, \
4066+ if so don't end the block. Starts from the top and works \
4067+ down because the most recent branch is most likely to \
4068+ join after the end (if/then form) */ \
4069+ for(i = block_exit_position - 2; i >= 0; i--) \
4070+ { \
4071+ if(block_exits[i].branch_target == block_end_pc) \
4072+ { \
4073+ break; \
4074+ } \
4075+ } \
4076+ \
4077+ if(i < 0) \
4078+ { \
4079+ break; \
4080+ } \
4081+ } \
4082+ \
4083+ if(block_exit_position == MAX_EXITS) \
4084+ { \
4085+ break; \
4086+ } \
4087+ } \
4088+ else \
4089+ { \
4090+ type##_set_condition(condition); \
4091+ } \
4092+ \
4093+ for(i = 0; i < translation_gate_targets; i++) \
4094+ { \
4095+ if(block_end_pc == translation_gate_target_pc[i]) \
4096+ { \
4097+ goto block_end; \
4098+ } \
4099+ } \
4100+ \
4101+ block_data[block_data_position].update_cycles = 0; \
4102+ block_data_position++; \
4103+ if((block_data_position == MAX_BLOCK_SIZE) || \
4104+ (block_end_pc == 0x3007FF0) || (block_end_pc == 0x203FFF0)) \
4105+ { \
4106+ break; \
4107+ } \
4108+ } while(1); \
4109+ \
4110+ block_end:; \
4111+} \
4112+
4113+#define arm_fix_pc() \
4114+ pc &= ~0x03 \
4115+
4116+#define thumb_fix_pc() \
4117+ pc &= ~0x01 \
4118+
4119+#define translate_block_builder(type) \
4120+s32 translate_block_##type(u32 pc, TRANSLATION_REGION_TYPE \
4121+ translation_region, u32 smc_enable) \
4122+{ \
4123+ u32 opcode = 0; \
4124+ u32 last_opcode; \
4125+ u32 condition; \
4126+ u32 last_condition; \
4127+ u32 pc_region = (pc >> 15); \
4128+ u32 new_pc_region; \
4129+ u8 *pc_address_block = memory_map_read[pc_region]; \
4130+ u32 block_start_pc = pc; \
4131+ u32 block_end_pc = pc; \
4132+ u32 block_exit_position = 0; \
4133+ s32 block_data_position = 0; \
4134+ u32 external_block_exit_position = 0; \
4135+ u32 branch_target; \
4136+ u32 cycle_count = 0; \
4137+ u8 *translation_target; \
4138+ u8 *backpatch_address = NULL; \
4139+ u8 *translation_ptr = NULL; \
4140+ u8 *translation_cache_limit = NULL; \
4141+ s32 i; \
4142+ u32 flag_status; \
4143+ BLOCK_EXIT_TYPE external_block_exits[MAX_EXITS]; \
4144+ generate_block_extra_vars_##type(); \
4145+ type##_fix_pc(); \
4146+ \
4147+ if(pc_address_block == NULL) \
4148+ { \
4149+ pc_address_block = load_gamepak_page(pc_region & 0x3FF); \
4150+ } \
4151+ \
4152+ switch(translation_region) \
4153+ { \
4154+ case TRANSLATION_REGION_RAM: \
4155+ { \
4156+ if(pc >= 0x3000000) \
4157+ { \
4158+ if((pc < iwram_code_min) || (iwram_code_min == 0xFFFFFFFF)) \
4159+ { \
4160+ iwram_code_min = pc; \
4161+ } \
4162+ } \
4163+ else \
4164+ if(pc >= 0x2000000) \
4165+ { \
4166+ if((pc < ewram_code_min) || (ewram_code_min == 0xFFFFFFFF)) \
4167+ { \
4168+ ewram_code_min = pc; \
4169+ } \
4170+ } \
4171+ \
4172+ translation_ptr = ram_translation_ptr; \
4173+ translation_cache_limit = \
4174+ ram_translation_cache + RAM_TRANSLATION_CACHE_SIZE - \
4175+ TRANSLATION_CACHE_LIMIT_THRESHOLD; \
4176+ break; \
4177+ } \
4178+ \
4179+ case TRANSLATION_REGION_ROM: \
4180+ { \
4181+ translation_ptr = rom_translation_ptr; \
4182+ translation_cache_limit = \
4183+ rom_translation_cache + ROM_TRANSLATION_CACHE_SIZE - \
4184+ TRANSLATION_CACHE_LIMIT_THRESHOLD; \
4185+ break; \
4186+ } \
4187+ \
4188+ case TRANSLATION_REGION_BIOS: \
4189+ { \
4190+ translation_ptr = bios_translation_ptr; \
4191+ translation_cache_limit = bios_translation_cache + \
4192+ BIOS_TRANSLATION_CACHE_SIZE; \
4193+ break; \
4194+ } \
4195+ } \
4196+ \
4197+ generate_block_prologue(); \
4198+ \
4199+ /* This is a function because it's used a lot more than it might seem (all \
4200+ of the data processing functions can access it), and its expansion was \
4201+ massacreing the compiler. */ \
4202+ \
4203+ if(smc_enable) \
4204+ { \
4205+ scan_block(type, yes); \
4206+ } \
4207+ else \
4208+ { \
4209+ scan_block(type, no); \
4210+ } \
4211+ \
4212+ for(i = 0; i < block_exit_position; i++) \
4213+ { \
4214+ branch_target = block_exits[i].branch_target; \
4215+ \
4216+ if((branch_target > block_start_pc) && (branch_target < block_end_pc)) \
4217+ { \
4218+ block_data[(branch_target - block_start_pc) / \
4219+ type##_instruction_width].update_cycles = 1; \
4220+ } \
4221+ } \
4222+ \
4223+ type##_dead_flag_eliminate(); \
4224+ \
4225+ block_exit_position = 0; \
4226+ block_data_position = 0; \
4227+ \
4228+ last_condition = 0x0E; \
4229+ \
4230+ while(pc != block_end_pc) \
4231+ { \
4232+ block_data[block_data_position].block_offset = translation_ptr; \
4233+ \
4234+ translate_##type##_instruction(); \
4235+ block_data_position++; \
4236+ \
4237+ /* If it went too far the cache needs to be flushed and the process \
4238+ restarted. Because we might already be nested several stages in \
4239+ a simple recursive call here won't work, it has to pedal out to \
4240+ the beginning. */ \
4241+ \
4242+ if(translation_ptr > translation_cache_limit) \
4243+ { \
4244+ translation_flush_count++; \
4245+ \
4246+ switch(translation_region) \
4247+ { \
4248+ case TRANSLATION_REGION_RAM: \
4249+ { \
4250+ flush_translation_cache_ram(); \
4251+ break; \
4252+ } \
4253+ \
4254+ case TRANSLATION_REGION_ROM: \
4255+ { \
4256+ flush_translation_cache_rom(); \
4257+ break; \
4258+ } \
4259+ \
4260+ case TRANSLATION_REGION_BIOS: \
4261+ { \
4262+ flush_translation_cache_bios(); \
4263+ break; \
4264+ } \
4265+ } \
4266+ \
4267+ return -1; \
4268+ } \
4269+ \
4270+ /* If the next instruction is a block entry point update the \
4271+ cycle counter and update */ \
4272+ if(block_data[block_data_position].update_cycles == 1) \
4273+ { \
4274+ generate_cycle_update(); \
4275+ } \
4276+ } \
4277+ \
4278+ for(i = 0; i < translation_gate_targets; i++) \
4279+ { \
4280+ if(pc == translation_gate_target_pc[i]) \
4281+ { \
4282+ generate_translation_gate(type); \
4283+ break; \
4284+ } \
4285+ } \
4286+ \
4287+ for(i = 0; i < block_exit_position; i++) \
4288+ { \
4289+ branch_target = block_exits[i].branch_target; \
4290+ \
4291+ if((branch_target >= block_start_pc) && (branch_target < block_end_pc)) \
4292+ { \
4293+ /* Internal branch, patch to recorded address */ \
4294+ translation_target = \
4295+ block_data[(branch_target - block_start_pc) / \
4296+ type##_instruction_width].block_offset; \
4297+ \
4298+ generate_branch_patch_unconditional(block_exits[i].branch_source, \
4299+ translation_target); \
4300+ } \
4301+ else \
4302+ { \
4303+ /* External branch, save for later */ \
4304+ external_block_exits[external_block_exit_position].branch_target = \
4305+ branch_target; \
4306+ external_block_exits[external_block_exit_position].branch_source = \
4307+ block_exits[i].branch_source; \
4308+ external_block_exit_position++; \
4309+ } \
4310+ } \
4311+ \
4312+ switch(translation_region) \
4313+ { \
4314+ case TRANSLATION_REGION_RAM: \
4315+ { \
4316+ if(pc >= 0x3000000) \
4317+ { \
4318+ if((pc > iwram_code_max) || (iwram_code_max == 0xFFFFFFFF)) \
4319+ { \
4320+ iwram_code_max = pc; \
4321+ } \
4322+ } \
4323+ else \
4324+ if(pc >= 0x2000000) \
4325+ { \
4326+ if((pc > ewram_code_max) || (ewram_code_max == 0xFFFFFFFF)) \
4327+ { \
4328+ ewram_code_max = pc; \
4329+ } \
4330+ } \
4331+ \
4332+ ram_translation_ptr = translation_ptr; \
4333+ break; \
4334+ } \
4335+ \
4336+ case TRANSLATION_REGION_ROM: \
4337+ { \
4338+ rom_translation_ptr = translation_ptr; \
4339+ break; \
4340+ } \
4341+ \
4342+ case TRANSLATION_REGION_BIOS: \
4343+ { \
4344+ bios_translation_ptr = translation_ptr; \
4345+ break; \
4346+ } \
4347+ } \
4348+ \
4349+ for(i = 0; i < external_block_exit_position; i++) \
4350+ { \
4351+ branch_target = external_block_exits[i].branch_target; \
4352+ type##_link_block(); \
4353+ if(translation_target == NULL) \
4354+ { \
4355+ return -1; \
4356+ } \
4357+ generate_branch_patch_unconditional( \
4358+ external_block_exits[i].branch_source, translation_target); \
4359+ } \
4360+ \
4361+ return 0; \
4362+} \
4363+
4364+translate_block_builder(arm);
4365+translate_block_builder(thumb);
4366+
4367+void flush_translation_cache_ram(void)
4368+{
4369+ invalidate_icache_region(ram_translation_cache,
4370+ (ram_translation_ptr - ram_translation_cache) + 0x100);
4371+
4372+ ram_translation_ptr = ram_translation_cache;
4373+ ram_block_tag_top = 0x0101;
4374+
4375+ if(iwram_code_min != 0xFFFFFFFF)
4376+ {
4377+ iwram_code_min &= 0x7FFF;
4378+ iwram_code_max &= 0x7FFF;
4379+ memset(iwram + iwram_code_min, 0, (iwram_code_max - iwram_code_min) + 1);
4380+ }
4381+
4382+ if(ewram_code_min != 0xFFFFFFFF)
4383+ {
4384+ u32 ewram_code_min_page;
4385+ u32 ewram_code_max_page;
4386+ u32 ewram_code_min_offset;
4387+ u32 ewram_code_max_offset;
4388+ u8 i;
4389+
4390+ ewram_code_min &= 0x3FFFF;
4391+ ewram_code_max &= 0x3FFFF;
4392+
4393+ ewram_code_min_page = ewram_code_min >> 15;
4394+ ewram_code_max_page = ewram_code_max >> 15;
4395+ ewram_code_min_offset = ewram_code_min & 0x7FFF;
4396+ ewram_code_max_offset = ewram_code_max & 0x7FFF;
4397+
4398+ if(ewram_code_min_page == ewram_code_max_page)
4399+ {
4400+ memset(ewram + (ewram_code_min_page << 16) + ewram_code_min_offset, 0,
4401+ (ewram_code_max_offset - ewram_code_min_offset) + 1);
4402+ }
4403+ else
4404+ {
4405+ memset(ewram + (ewram_code_min_page << 16) + ewram_code_min_offset, 0,
4406+ 0x8000);
4407+
4408+ for(i = ewram_code_min_page + 1; i < ewram_code_max_page; i++)
4409+ {
4410+ memset(ewram + (i << 16), 0, 0x8000);
4411+ }
4412+
4413+ memset(ewram + (ewram_code_max_page << 16), 0, ewram_code_max_offset + 1);
4414+ }
4415+ }
4416+
4417+ iwram_code_min = 0xFFFFFFFF;
4418+ iwram_code_max = 0xFFFFFFFF;
4419+ ewram_code_min = 0xFFFFFFFF;
4420+ ewram_code_max = 0xFFFFFFFF;
4421+}
4422+
4423+void flush_translation_cache_rom(void)
4424+{
4425+ invalidate_icache_region(rom_translation_cache,
4426+ rom_translation_ptr - rom_translation_cache + 0x100);
4427+
4428+ rom_translation_ptr = rom_translation_cache;
4429+ memset(rom_branch_hash, 0, sizeof(rom_branch_hash));
4430+}
4431+
4432+void flush_translation_cache_bios(void)
4433+{
4434+ invalidate_icache_region(bios_translation_cache,
4435+ bios_translation_ptr - bios_translation_cache + 0x100);
4436+
4437+ bios_block_tag_top = 0x0101;
4438+ bios_translation_ptr = bios_translation_cache;
4439+ memset(bios_rom + 0x4000, 0, 0x4000);
4440+}
4441+
4442+void dump_translation_cache(void)
4443+{
4444+ FILE *fp = fopen("ram_cache.bin", "wb");
4445+ fwrite(ram_translation_cache, ram_translation_ptr - ram_translation_cache,
4446+ 1, fp);
4447+ fclose(fp);
4448+ fp = fopen("rom_cache.bin", "wb");
4449+ fwrite(rom_translation_cache, rom_translation_ptr - rom_translation_cache,
4450+ 1, fp);
4451+ fclose(fp);
4452+ fp = fopen("bios_cache.bin", "wb");
4453+ fwrite(bios_translation_cache, bios_translation_ptr - bios_translation_cache,
4454+ 1, fp);
4455+ fclose(fp);
4456+}
4457+
4458+
4459+void set_cpu_mode(CPU_MODE_TYPE new_mode)
4460+{
4461+ u8 i;
4462+ CPU_MODE_TYPE cpu_mode = reg[CPU_MODE];
4463+
4464+ if(cpu_mode != new_mode)
4465+ {
4466+ if(new_mode == MODE_FIQ)
4467+ {
4468+ for(i = 8; i < 15; i++)
4469+ {
4470+ reg_mode[cpu_mode][i - 8] = reg[i];
4471+ }
4472+ }
4473+ else
4474+ {
4475+ reg_mode[cpu_mode][5] = reg[REG_SP];
4476+ reg_mode[cpu_mode][6] = reg[REG_LR];
4477+ }
4478+
4479+ if(cpu_mode == MODE_FIQ)
4480+ {
4481+ for(i = 8; i < 15; i++)
4482+ {
4483+ reg[i] = reg_mode[new_mode][i - 8];
4484+ }
4485+ }
4486+ else
4487+ {
4488+ reg[REG_SP] = reg_mode[new_mode][5];
4489+ reg[REG_LR] = reg_mode[new_mode][6];
4490+ }
4491+
4492+ reg[CPU_MODE] = new_mode;
4493+ }
4494+}
4495+
4496+void raise_interrupt(IRQ_TYPE irq_raised)
4497+{
4498+ // The specific IRQ must be enabled in IE, master IRQ enable must be on,
4499+ // and it must be on in the flags.
4500+ io_registers[REG_IF] |= irq_raised;
4501+
4502+ if((io_registers[REG_IE] & io_registers[REG_IF]) && io_registers[REG_IME] &&
4503+ ((reg[REG_CPSR] & 0x80) == 0))
4504+ {
4505+ bios_read_protect = 0xe55ec002;
4506+
4507+ // Interrupt handler in BIOS
4508+ reg_mode[MODE_IRQ][6] = reg[REG_PC] + 4;
4509+ spsr[MODE_IRQ] = reg[REG_CPSR];
4510+ reg[REG_CPSR] = (reg[REG_CPSR] & ~0xFF) | 0xD2;
4511+ reg[REG_PC] = 0x00000018;
4512+
4513+ bios_region_read_allow();
4514+
4515+ set_cpu_mode(MODE_IRQ);
4516+ reg[CPU_HALT_STATE] = CPU_ACTIVE;
4517+ reg[CHANGED_PC_STATUS] = 1;
4518+ }
4519+}
4520+
4521+void init_cpu(void)
4522+{
4523+ u8 i;
4524+
4525+ for(i = 0; i < 16; i++)
4526+ {
4527+ reg[i] = 0;
4528+ }
4529+
4530+ reg[REG_SP] = 0x03007F00;
4531+ reg[REG_PC] = 0x08000000;
4532+ reg[REG_CPSR] = 0x0000001F;
4533+ reg[CPU_HALT_STATE] = CPU_ACTIVE;
4534+ reg[CPU_MODE] = MODE_USER;
4535+ reg[CHANGED_PC_STATUS] = 0;
4536+
4537+ reg_mode[MODE_USER][5] = 0x03007F00;
4538+ reg_mode[MODE_IRQ][5] = 0x03007FA0;
4539+ reg_mode[MODE_FIQ][5] = 0x03007FA0;
4540+ reg_mode[MODE_SUPERVISOR][5] = 0x03007FE0;
4541+}
4542+
4543+
4544+#define cpu_savestate_body(type) \
4545+{ \
4546+ FILE_##type(savestate_file, reg, 0x100); \
4547+ FILE_##type##_ARRAY(savestate_file, spsr); \
4548+ FILE_##type##_ARRAY(savestate_file, reg_mode); \
4549+} \
4550+
4551+void cpu_read_savestate(FILE_TAG_TYPE savestate_file)
4552+ cpu_savestate_body(READ);
4553+
4554+void cpu_write_mem_savestate(FILE_TAG_TYPE savestate_file)
4555+ cpu_savestate_body(WRITE_MEM);
4556+
4557+
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
--- trunk/gpsp-kai-test/src/mips_emit_new.h (nonexistent)
+++ trunk/gpsp-kai-test/src/mips_emit_new.h (revision 378)
@@ -0,0 +1,2702 @@
1+/* unofficial gameplaySP kai
2+ *
3+ * Copyright (C) 2006 Exophase <exophase@gmail.com>
4+ * Copyright (C) 2007 takka <takka@tfact.net>
5+ *
6+ * This program is free software; you can redistribute it and/or
7+ * modify it under the terms of the GNU General Public License as
8+ * published by the Free Software Foundation; either version 2 of
9+ * the License, or (at your option) any later version.
10+ *
11+ * This program is distributed in the hope that it will be useful,
12+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14+ * General Public License for more details.
15+ *
16+ * You should have received a copy of the GNU General Public License
17+ * along with this program; if not, write to the Free Software
18+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19+ */
20+
21+#ifndef MIPS_EMIT_H
22+#define MIPS_EMIT_H
23+
24+
25+u32 mips_update_gba(u32 pc);
26+
27+static u8 cycle_multiply(u32 _rs);
28+
29+// Although these are defined as a function, don't call them as
30+// such (jump to it instead)
31+void mips_indirect_branch_arm(u32 address);
32+void mips_indirect_branch_thumb(u32 address);
33+void mips_indirect_branch_dual(u32 address);
34+
35+u32 execute_read_cpsr(void);
36+u32 execute_read_spsr(void);
37+void execute_swi(u32 pc);
38+
39+u32 execute_spsr_restore(u32 address);
40+void execute_store_cpsr(u32 new_cpsr, u32 store_mask);
41+void execute_store_spsr(u32 new_spsr, u32 store_mask);
42+
43+u32 execute_spsr_restore_body(u32 address);
44+u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address);
45+
46+u32 execute_lsl_flags_reg(u32 value, u32 shift);
47+u32 execute_lsr_flags_reg(u32 value, u32 shift);
48+u32 execute_asr_flags_reg(u32 value, u32 shift);
49+u32 execute_ror_flags_reg(u32 value, u32 shift);
50+
51+void execute_aligned_store32(u32 address, u32 value);
52+u32 execute_aligned_load32(u32 address);
53+
54+// void reg_check();
55+
56+typedef enum
57+{
58+ mips_reg_zero, // 0
59+ mips_reg_at, // 1
60+ mips_reg_v0, // 2
61+ mips_reg_v1, // 3
62+ mips_reg_a0, // 4
63+ mips_reg_a1, // 5
64+ mips_reg_a2, // 6
65+ mips_reg_a3, // 7
66+ mips_reg_t0, // 8
67+ mips_reg_t1, // 9
68+ mips_reg_t2, // 10
69+ mips_reg_t3, // 11
70+ mips_reg_t4, // 12
71+ mips_reg_t5, // 13
72+ mips_reg_t6, // 14
73+ mips_reg_t7, // 15
74+ mips_reg_s0, // 16
75+ mips_reg_s1, // 17
76+ mips_reg_s2, // 18
77+ mips_reg_s3, // 19
78+ mips_reg_s4, // 20
79+ mips_reg_s5, // 21
80+ mips_reg_s6, // 22
81+ mips_reg_s7, // 23
82+ mips_reg_t8, // 24
83+ mips_reg_t9, // 25
84+ mips_reg_k0, // 26
85+ mips_reg_k1, // 27
86+ mips_reg_gp, // 28
87+ mips_reg_sp, // 29
88+ mips_reg_fp, // 30
89+ mips_reg_ra // 31
90+} MIPS_REG_NUMBER;
91+
92+typedef enum
93+{
94+ mips_special_sll = 0x00,
95+ mips_special_srl = 0x02,
96+ mips_special_sra = 0x03,
97+ mips_special_sllv = 0x04,
98+ mips_special_srlv = 0x06,
99+ mips_special_srav = 0x07,
100+ mips_special_jr = 0x08,
101+ mips_special_jalr = 0x09,
102+ mips_special_movz = 0x0A,
103+ mips_special_movn = 0x0B,
104+ mips_special_mfhi = 0x10,
105+ mips_special_mthi = 0x11,
106+ mips_special_mflo = 0x12,
107+ mips_special_mtlo = 0x13,
108+ mips_special_mult = 0x18,
109+ mips_special_multu = 0x19,
110+ mips_special_div = 0x1A,
111+ mips_special_divu = 0x1B,
112+ mips_special_madd = 0x1C,
113+ mips_special_maddu = 0x1D,
114+ mips_special_add = 0x20,
115+ mips_special_addu = 0x21,
116+ mips_special_sub = 0x22,
117+ mips_special_subu = 0x23,
118+ mips_special_and = 0x24,
119+ mips_special_or = 0x25,
120+ mips_special_xor = 0x26,
121+ mips_special_nor = 0x27,
122+ mips_special_slt = 0x2A,
123+ mips_special_sltu = 0x2B
124+} MIPS_FUNCTION_SPECIAL;
125+
126+typedef enum
127+{
128+ mips_special3_ext = 0x00,
129+ mips_special3_ins = 0x04,
130+ mips_special3_bshfl = 0x20
131+} MIPS_FUNCTION_SPECIAL3;
132+
133+typedef enum
134+{
135+ mips_regimm_bltz = 0x00,
136+ mips_regimm_bltzal = 0x10
137+} MIPS_FUNCTION_REGIMM;
138+
139+typedef enum
140+{
141+ mips_opcode_special = 0x00,
142+ mips_opcode_regimm = 0x01,
143+ mips_opcode_j = 0x02,
144+ mips_opcode_jal = 0x03,
145+ mips_opcode_beq = 0x04,
146+ mips_opcode_bne = 0x05,
147+ mips_opcode_blez = 0x06,
148+ mips_opcode_bgtz = 0x07,
149+ mips_opcode_addi = 0x08,
150+ mips_opcode_addiu = 0x09,
151+ mips_opcode_slti = 0x0A,
152+ mips_opcode_sltiu = 0x0B,
153+ mips_opcode_andi = 0x0C,
154+ mips_opcode_ori = 0x0D,
155+ mips_opcode_xori = 0x0E,
156+ mips_opcode_lui = 0x0F,
157+ mips_opcode_llo = 0x18,
158+ mips_opcode_lhi = 0x19,
159+ mips_opcode_trap = 0x1A,
160+ mips_opcode_special2 = 0x1C,
161+ mips_opcode_special3 = 0x1F,
162+ mips_opcode_lb = 0x20,
163+ mips_opcode_lh = 0x21,
164+ mips_opcode_lw = 0x23,
165+ mips_opcode_lbu = 0x24,
166+ mips_opcode_lhu = 0x25,
167+ mips_opcode_sb = 0x28,
168+ mips_opcode_sh = 0x29,
169+ mips_opcode_sw = 0x2B,
170+} MIPS_OPCODE;
171+
172+#define mips_emit_reg(opcode, rs, rt, rd, shift, function) \
173+ *((u32 *)translation_ptr) = (mips_opcode_##opcode << 26) | \
174+ (rs << 21) | (rt << 16) | (rd << 11) | (shift << 6) | function; \
175+ translation_ptr += 4 \
176+
177+#define mips_emit_special(function, rs, rt, rd, shift) \
178+ *((u32 *)translation_ptr) = (mips_opcode_special << 26) | \
179+ (rs << 21) | (rt << 16) | (rd << 11) | (shift << 6) | \
180+ mips_special_##function; \
181+ translation_ptr += 4 \
182+
183+#define mips_emit_special3(function, rs, rt, imm_a, imm_b) \
184+ *((u32 *)translation_ptr) = (mips_opcode_special3 << 26) | \
185+ (rs << 21) | (rt << 16) | (imm_a << 11) | (imm_b << 6) | \
186+ mips_special3_##function; \
187+ translation_ptr += 4 \
188+
189+#define mips_emit_imm(opcode, rs, rt, immediate) \
190+ *((u32 *)translation_ptr) = (mips_opcode_##opcode << 26) | \
191+ (rs << 21) | (rt << 16) | (immediate & 0xFFFF); \
192+ translation_ptr += 4 \
193+
194+#define mips_emit_regimm(function, rs, immediate) \
195+ *((u32 *)translation_ptr) = (mips_opcode_regimm << 26) | \
196+ (rs << 21) | (mips_regimm_##function << 16) | (immediate & 0xFFFF); \
197+ translation_ptr += 4 \
198+
199+#define mips_emit_jump(opcode, offset) \
200+ *((u32 *)translation_ptr) = (mips_opcode_##opcode << 26) | \
201+ (offset & 0x3FFFFFF); \
202+ translation_ptr += 4 \
203+
204+#define mips_relative_offset(source, offset) \
205+ (((u32)offset - ((u32)source + 4)) >> 2) \
206+
207+#define mips_absolute_offset(offset) \
208+ ((u32)offset >> 2) \
209+
210+// ADDU rd, rs, rt
211+#define mips_emit_addu(rd, rs, rt) \
212+ mips_emit_special(addu, rs, rt, rd, 0) \
213+
214+#define mips_emit_subu(rd, rs, rt) \
215+ mips_emit_special(subu, rs, rt, rd, 0) \
216+
217+#define mips_emit_xor(rd, rs, rt) \
218+ mips_emit_special(xor, rs, rt, rd, 0) \
219+
220+#define mips_emit_add(rd, rs, rt) \
221+ mips_emit_special(add, rs, rt, rd, 0) \
222+
223+#define mips_emit_sub(rd, rs, rt) \
224+ mips_emit_special(sub, rs, rt, rd, 0) \
225+
226+#define mips_emit_and(rd, rs, rt) \
227+ mips_emit_special(and, rs, rt, rd, 0) \
228+
229+#define mips_emit_or(rd, rs, rt) \
230+ mips_emit_special(or, rs, rt, rd, 0) \
231+
232+#define mips_emit_nor(rd, rs, rt) \
233+ mips_emit_special(nor, rs, rt, rd, 0) \
234+
235+#define mips_emit_slt(rd, rs, rt) \
236+ mips_emit_special(slt, rs, rt, rd, 0) \
237+
238+#define mips_emit_sltu(rd, rs, rt) \
239+ mips_emit_special(sltu, rs, rt, rd, 0) \
240+
241+#define mips_emit_sllv(rd, rt, rs) \
242+ mips_emit_special(sllv, rs, rt, rd, 0) \
243+
244+#define mips_emit_srlv(rd, rt, rs) \
245+ mips_emit_special(srlv, rs, rt, rd, 0) \
246+
247+#define mips_emit_srav(rd, rt, rs) \
248+ mips_emit_special(srav, rs, rt, rd, 0) \
249+
250+#define mips_emit_rotrv(rd, rt, rs) \
251+ mips_emit_special(srlv, rs, rt, rd, 1) \
252+
253+#define mips_emit_sll(rd, rt, shift) \
254+ mips_emit_special(sll, 0, rt, rd, shift) \
255+
256+#define mips_emit_srl(rd, rt, shift) \
257+ mips_emit_special(srl, 0, rt, rd, shift) \
258+
259+#define mips_emit_sra(rd, rt, shift) \
260+ mips_emit_special(sra, 0, rt, rd, shift) \
261+
262+#define mips_emit_rotr(rd, rt, shift) \
263+ mips_emit_special(srl, 1, rt, rd, shift) \
264+
265+#define mips_emit_mfhi(rd) \
266+ mips_emit_special(mfhi, 0, 0, rd, 0) \
267+
268+#define mips_emit_mflo(rd) \
269+ mips_emit_special(mflo, 0, 0, rd, 0) \
270+
271+#define mips_emit_mthi(rs) \
272+ mips_emit_special(mthi, rs, 0, 0, 0) \
273+
274+#define mips_emit_mtlo(rs) \
275+ mips_emit_special(mtlo, rs, 0, 0, 0) \
276+
277+#define mips_emit_mult(rs, rt) \
278+ mips_emit_special(mult, rs, rt, 0, 0) \
279+
280+#define mips_emit_multu(rs, rt) \
281+ mips_emit_special(multu, rs, rt, 0, 0) \
282+
283+#define mips_emit_div(rs, rt) \
284+ mips_emit_special(div, rs, rt, 0, 0) \
285+
286+#define mips_emit_divu(rs, rt) \
287+ mips_emit_special(divu, rs, rt, 0, 0) \
288+
289+#define mips_emit_madd(rs, rt) \
290+ mips_emit_special(madd, rs, rt, 0, 0) \
291+
292+#define mips_emit_maddu(rs, rt) \
293+ mips_emit_special(maddu, rs, rt, 0, 0) \
294+
295+#define mips_emit_movn(rd, rs, rt) \
296+ mips_emit_special(movn, rs, rt, rd, 0) \
297+
298+#define mips_emit_movz(rd, rs, rt) \
299+ mips_emit_special(movz, rs, rt, rd, 0) \
300+
301+#define mips_emit_lb(rt, rs, offset) \
302+ mips_emit_imm(lb, rs, rt, offset) \
303+
304+#define mips_emit_lbu(rt, rs, offset) \
305+ mips_emit_imm(lbu, rs, rt, offset) \
306+
307+#define mips_emit_lh(rt, rs, offset) \
308+ mips_emit_imm(lh, rs, rt, offset) \
309+
310+#define mips_emit_lhu(rt, rs, offset) \
311+ mips_emit_imm(lhu, rs, rt, offset) \
312+
313+#define mips_emit_lw(rt, rs, offset) \
314+ mips_emit_imm(lw, rs, rt, offset) \
315+
316+#define mips_emit_sb(rt, rs, offset) \
317+ mips_emit_imm(sb, rs, rt, offset) \
318+
319+#define mips_emit_sh(rt, rs, offset) \
320+ mips_emit_imm(sh, rs, rt, offset) \
321+
322+#define mips_emit_sw(rt, rs, offset) \
323+ mips_emit_imm(sw, rs, rt, offset) \
324+
325+#define mips_emit_lui(rt, imm) \
326+ mips_emit_imm(lui, 0, rt, imm) \
327+
328+#define mips_emit_addiu(rt, rs, imm) \
329+ mips_emit_imm(addiu, rs, rt, imm) \
330+
331+#define mips_emit_xori(rt, rs, imm) \
332+ mips_emit_imm(xori, rs, rt, imm) \
333+
334+#define mips_emit_ori(rt, rs, imm) \
335+ mips_emit_imm(ori, rs, rt, imm) \
336+
337+#define mips_emit_andi(rt, rs, imm) \
338+ mips_emit_imm(andi, rs, rt, imm) \
339+
340+#define mips_emit_slti(rt, rs, imm) \
341+ mips_emit_imm(slti, rs, rt, imm) \
342+
343+#define mips_emit_sltiu(rt, rs, imm) \
344+ mips_emit_imm(sltiu, rs, rt, imm) \
345+
346+#define mips_emit_ext(rt, rs, pos, size) \
347+ mips_emit_special3(ext, rs, rt, (size - 1), pos) \
348+
349+#define mips_emit_ins(rt, rs, pos, size) \
350+ mips_emit_special3(ins, rs, rt, (pos + size - 1), pos) \
351+
352+// Breaks down if the backpatch offset is greater than 16bits, take care
353+// when using (should be okay if limited to conditional instructions)
354+
355+#define mips_emit_b_filler(type, rs, rt, writeback_location) \
356+ (writeback_location) = translation_ptr; \
357+ mips_emit_imm(type, rs, rt, 0) \
358+
359+// The backpatch code for this has to be handled differently than the above
360+
361+#define mips_emit_j_filler(writeback_location) \
362+ (writeback_location) = translation_ptr; \
363+ mips_emit_jump(j, 0) \
364+
365+#define mips_emit_b(type, rs, rt, offset) \
366+ mips_emit_imm(type, rs, rt, offset) \
367+
368+#define mips_emit_j(offset) \
369+ mips_emit_jump(j, offset) \
370+
371+#define mips_emit_jal(offset) \
372+ mips_emit_jump(jal, offset) \
373+
374+#define mips_emit_jr(rs) \
375+ mips_emit_special(jr, rs, 0, 0, 0) \
376+
377+#define mips_emit_bltzal(rs, offset) \
378+ mips_emit_regimm(bltzal, rs, offset) \
379+
380+#define mips_emit_nop() \
381+ mips_emit_sll(reg_zero, reg_zero, 0) \
382+
383+#define reg_base mips_reg_s0
384+#define reg_cycles mips_reg_s1
385+#define reg_a0 mips_reg_a0
386+#define reg_a1 mips_reg_a1
387+#define reg_a2 mips_reg_a2
388+#define reg_rv mips_reg_v0
389+#define reg_pc mips_reg_s3
390+#define reg_temp mips_reg_at
391+#define reg_zero mips_reg_zero
392+
393+#define reg_n_cache mips_reg_s4
394+#define reg_z_cache mips_reg_s5
395+#define reg_c_cache mips_reg_s6
396+#define reg_v_cache mips_reg_s7
397+
398+#define reg_r0 mips_reg_v1
399+#define reg_r1 mips_reg_a3
400+#define reg_r2 mips_reg_t0
401+#define reg_r3 mips_reg_t1
402+#define reg_r4 mips_reg_t2
403+#define reg_r5 mips_reg_t3
404+#define reg_r6 mips_reg_t4
405+#define reg_r7 mips_reg_t5
406+#define reg_r8 mips_reg_t6
407+#define reg_r9 mips_reg_t7
408+#define reg_r10 mips_reg_s2
409+#define reg_r11 mips_reg_t8
410+#define reg_r12 mips_reg_t9
411+#define reg_r13 mips_reg_gp
412+#define reg_r14 mips_reg_fp
413+
414+// Writing to r15 goes straight to a0, to be chained with other ops
415+
416+u32 arm_to_mips_reg[] =
417+{
418+ reg_r0,
419+ reg_r1,
420+ reg_r2,
421+ reg_r3,
422+ reg_r4,
423+ reg_r5,
424+ reg_r6,
425+ reg_r7,
426+ reg_r8,
427+ reg_r9,
428+ reg_r10,
429+ reg_r11,
430+ reg_r12,
431+ reg_r13,
432+ reg_r14,
433+ reg_a0,
434+ reg_a1,
435+ reg_a2,
436+ reg_temp
437+};
438+
439+#define arm_reg_a0 15
440+#define arm_reg_a1 16
441+#define arm_reg_a2 17
442+#define arm_reg_temp 18
443+
444+#define generate_load_reg(ireg, reg_index) \
445+ mips_emit_addu(ireg, arm_to_mips_reg[reg_index], reg_zero) \
446+
447+#define generate_load_imm(ireg, imm) \
448+ if(((s32)imm >= -32768) && ((s32)imm <= 32767)) \
449+ { \
450+ mips_emit_addiu(ireg, reg_zero, imm); \
451+ } \
452+ else \
453+ { \
454+ if(((u32)imm >> 16) == 0x0000) \
455+ { \
456+ mips_emit_ori(ireg, reg_zero, imm); \
457+ } \
458+ else \
459+ { \
460+ mips_emit_lui(ireg, imm >> 16); \
461+ \
462+ if(((u32)imm & 0x0000FFFF) != 0x00000000) \
463+ { \
464+ mips_emit_ori(ireg, ireg, imm & 0xFFFF); \
465+ } \
466+ } \
467+ } \
468+
469+#define generate_load_pc(ireg, new_pc) \
470+{ \
471+ s32 pc_delta = new_pc - stored_pc; \
472+ if((pc_delta >= -32768) && (pc_delta <= 32767)) \
473+ { \
474+ mips_emit_addiu(ireg, reg_pc, pc_delta); \
475+ } \
476+ else \
477+ { \
478+ generate_load_imm(ireg, new_pc); \
479+ } \
480+} \
481+
482+#define generate_store_reg(ireg, reg_index) \
483+ mips_emit_addu(arm_to_mips_reg[reg_index], ireg, reg_zero) \
484+
485+#define generate_shift_left(ireg, imm) \
486+ mips_emit_sll(ireg, ireg, imm) \
487+
488+#define generate_shift_right(ireg, imm) \
489+ mips_emit_srl(ireg, ireg, imm) \
490+
491+#define generate_shift_right_arithmetic(ireg, imm) \
492+ mips_emit_sra(ireg, ireg, imm) \
493+
494+#define generate_rotate_right(ireg, imm) \
495+ mips_emit_rotr(ireg, ireg, imm) \
496+
497+#define generate_add(ireg_dest, ireg_src) \
498+ mips_emit_addu(ireg_dest, ireg_dest, ireg_src) \
499+
500+#define generate_sub(ireg_dest, ireg_src) \
501+ mips_emit_subu(ireg_dest, ireg_dest, ireg_src) \
502+
503+#define generate_or(ireg_dest, ireg_src) \
504+ mips_emit_or(ireg_dest, ireg_dest, ireg_src) \
505+
506+#define generate_xor(ireg_dest, ireg_src) \
507+ mips_emit_xor(ireg_dest, ireg_dest, ireg_src) \
508+
509+#define generate_alu_imm(imm_type, reg_type, ireg_dest, ireg_src, imm) \
510+ if(((s32)imm >= -32768) && ((s32)imm <= 32767)) \
511+ { \
512+ mips_emit_##imm_type(ireg_dest, ireg_src, imm); \
513+ } \
514+ else \
515+ { \
516+ generate_load_imm(reg_temp, imm); \
517+ mips_emit_##reg_type(ireg_dest, ireg_src, reg_temp); \
518+ } \
519+
520+#define generate_alu_immu(imm_type, reg_type, ireg_dest, ireg_src, imm) \
521+ if(((u32)imm >= 0) && ((u32)imm <= 65535)) \
522+ { \
523+ mips_emit_##imm_type(ireg_dest, ireg_src, imm); \
524+ } \
525+ else \
526+ { \
527+ generate_load_imm(reg_temp, imm); \
528+ mips_emit_##reg_type(ireg_dest, ireg_src, reg_temp); \
529+ } \
530+
531+#define generate_add_imm(ireg, imm) \
532+ generate_alu_imm(addiu, add, ireg, ireg, imm) \
533+
534+#define generate_sub_imm(ireg, imm) \
535+ generate_alu_imm(addiu, add, ireg, ireg, -imm) \
536+
537+#define generate_xor_imm(ireg, imm) \
538+ generate_alu_immu(xori, xor, ireg, ireg, imm) \
539+
540+#define generate_add_reg_reg_imm(ireg_dest, ireg_src, imm) \
541+ generate_alu_imm(addiu, add, ireg_dest, ireg_src, imm) \
542+
543+#define generate_and_imm(ireg, imm) \
544+ generate_alu_immu(andi, and, ireg, ireg, imm) \
545+
546+#define generate_mov(ireg_dest, ireg_src) \
547+ mips_emit_addu(ireg_dest, ireg_src, reg_zero) \
548+
549+#define generate_multiply_s64() \
550+ mips_emit_mult(arm_to_mips_reg[rm], arm_to_mips_reg[rs]) \
551+
552+#define generate_multiply_u64() \
553+ mips_emit_multu(arm_to_mips_reg[rm], arm_to_mips_reg[rs]) \
554+
555+#define generate_multiply_s64_add() \
556+ mips_emit_madd(arm_to_mips_reg[rm], arm_to_mips_reg[rs]) \
557+
558+#define generate_multiply_u64_add() \
559+ mips_emit_maddu(arm_to_mips_reg[rm], arm_to_mips_reg[rs]) \
560+
561+#define generate_function_call(function_location) \
562+ mips_emit_jal(mips_absolute_offset(function_location)); \
563+ mips_emit_nop() \
564+
565+#define generate_function_call_swap_delay(function_location) \
566+{ \
567+ u32 delay_instruction = ADDRESS32(translation_ptr, -4); \
568+ translation_ptr -= 4; \
569+ mips_emit_jal(mips_absolute_offset(function_location)); \
570+ ADDRESS32(translation_ptr, 0) = delay_instruction; \
571+ translation_ptr += 4; \
572+} \
573+
574+#define generate_swap_delay() \
575+{ \
576+ u32 delay_instruction = ADDRESS32(translation_ptr, -8); \
577+ u32 branch_instruction = ADDRESS32(translation_ptr, -4); \
578+ branch_instruction = (branch_instruction & 0xFFFF0000) | \
579+ (((branch_instruction & 0x0000FFFF) + 1) & 0x0000FFFF); \
580+ ADDRESS32(translation_ptr, -8) = branch_instruction; \
581+ ADDRESS32(translation_ptr, -4) = delay_instruction; \
582+} \
583+
584+#define generate_cycle_update() \
585+ if(cycle_count != 0) \
586+ { \
587+ mips_emit_addiu(reg_cycles, reg_cycles, -cycle_count); \
588+ cycle_count = 0; \
589+ } \
590+
591+#define generate_cycle_update_force() \
592+ mips_emit_addiu(reg_cycles, reg_cycles, -cycle_count); \
593+ cycle_count = 0 \
594+
595+#define generate_branch_patch_conditional(dest, offset) \
596+ *((u16 *)(dest)) = mips_relative_offset(dest, offset) \
597+
598+#define generate_branch_patch_unconditional(dest, offset) \
599+ *((u32 *)(dest)) = (mips_opcode_j << 26) | \
600+ ((mips_absolute_offset(offset)) & 0x3FFFFFF) \
601+
602+#define generate_branch_no_cycle_update(writeback_location, new_pc) \
603+ { \
604+ u8 i; \
605+ u8 flag = 0; \
606+ \
607+ for( i = 0; i < idle_loop_targets; i++) \
608+ { \
609+ if(pc == idle_loop_target_pc[i]) \
610+ { \
611+ flag = 1; \
612+ } \
613+ } \
614+ \
615+ if(flag == 1) \
616+ { \
617+ generate_load_pc(reg_a0, new_pc); \
618+ generate_function_call_swap_delay(mips_update_gba); \
619+ mips_emit_j_filler(writeback_location); \
620+ mips_emit_nop(); \
621+ } \
622+ else \
623+ { \
624+ generate_load_pc(reg_a0, new_pc); \
625+ mips_emit_bltzal(reg_cycles, \
626+ mips_relative_offset(translation_ptr, update_trampoline)); \
627+ generate_swap_delay(); \
628+ mips_emit_j_filler(writeback_location); \
629+ mips_emit_nop(); \
630+ } \
631+ } \
632+
633+#define generate_branch_cycle_update(writeback_location, new_pc) \
634+ generate_cycle_update(); \
635+ generate_branch_no_cycle_update(writeback_location, new_pc) \
636+
637+#define generate_conditional_branch(ireg_a, ireg_b, type, writeback_location) \
638+ generate_branch_filler_##type(ireg_a, ireg_b, writeback_location) \
639+
640+// a0 holds the destination
641+
642+#define generate_indirect_branch_cycle_update(type) \
643+ mips_emit_j(mips_absolute_offset(mips_indirect_branch_##type)); \
644+ generate_cycle_update_force() \
645+
646+#define generate_indirect_branch_no_cycle_update(type) \
647+ mips_emit_j(mips_absolute_offset(mips_indirect_branch_##type)); \
648+ mips_emit_nop() \
649+
650+#define generate_block_prologue() \
651+ update_trampoline = translation_ptr; \
652+ __asm__ \
653+ ( \
654+ "cache 8, 0(%0)\n" \
655+ "cache 8, 0(%0)" : : "r"(translation_ptr) \
656+ ); \
657+ \
658+ mips_emit_j(mips_absolute_offset(mips_update_gba)); \
659+ mips_emit_nop(); \
660+ generate_load_imm(reg_pc, stored_pc) \
661+
662+#define translate_invalidate_dcache() \
663+ sceKernelDcacheWritebackAll() \
664+
665+#define block_prologue_size 8
666+
667+#define check_generate_n_flag \
668+ (flag_status & 0x08) \
669+
670+#define check_generate_z_flag \
671+ (flag_status & 0x04) \
672+
673+#define check_generate_c_flag \
674+ (flag_status & 0x02) \
675+
676+#define check_generate_v_flag \
677+ (flag_status & 0x01) \
678+
679+#define generate_load_reg_pc(ireg, reg_index, pc_offset) \
680+ if(reg_index == REG_PC) \
681+ { \
682+ generate_load_pc(ireg, (pc + pc_offset)); \
683+ } \
684+ else \
685+ { \
686+ generate_load_reg(ireg, reg_index); \
687+ } \
688+
689+#define check_load_reg_pc(arm_reg, reg_index, pc_offset) \
690+ if(reg_index == REG_PC) \
691+ { \
692+ reg_index = arm_reg; \
693+ generate_load_pc(arm_to_mips_reg[arm_reg], (pc + pc_offset)); \
694+ } \
695+
696+#define check_store_reg_pc_no_flags(reg_index) \
697+ if(reg_index == REG_PC) \
698+ { \
699+ generate_indirect_branch_arm(); \
700+ } \
701+
702+#define check_store_reg_pc_flags(reg_index) \
703+ if(reg_index == REG_PC) \
704+ { \
705+ generate_function_call(execute_spsr_restore); \
706+ generate_indirect_branch_dual(); \
707+ } \
708+
709+#define generate_shift_imm_lsl_no_flags(arm_reg, _rm, _shift) \
710+ check_load_reg_pc(arm_reg, _rm, 8); \
711+ if(_shift != 0) \
712+ { \
713+ mips_emit_sll(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
714+ _rm = arm_reg; \
715+ } \
716+
717+#define generate_shift_imm_lsr_no_flags(arm_reg, _rm, _shift) \
718+ if(_shift != 0) \
719+ { \
720+ check_load_reg_pc(arm_reg, _rm, 8); \
721+ mips_emit_srl(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
722+ } \
723+ else \
724+ { \
725+ mips_emit_addu(arm_to_mips_reg[arm_reg], reg_zero, reg_zero); \
726+ } \
727+ _rm = arm_reg \
728+
729+#define generate_shift_imm_asr_no_flags(arm_reg, _rm, _shift) \
730+ check_load_reg_pc(arm_reg, _rm, 8); \
731+ if(_shift != 0) \
732+ { \
733+ mips_emit_sra(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
734+ } \
735+ else \
736+ { \
737+ mips_emit_sra(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], 31); \
738+ } \
739+ _rm = arm_reg \
740+
741+#define generate_shift_imm_ror_no_flags(arm_reg, _rm, _shift) \
742+ check_load_reg_pc(arm_reg, _rm, 8); \
743+ if(_shift != 0) \
744+ { \
745+ mips_emit_rotr(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
746+ } \
747+ else \
748+ { \
749+ mips_emit_srl(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], 1); \
750+ mips_emit_ins(arm_to_mips_reg[arm_reg], reg_c_cache, 31, 1); \
751+ } \
752+ _rm = arm_reg \
753+
754+#define generate_shift_imm_lsl_flags(arm_reg, _rm, _shift) \
755+ check_load_reg_pc(arm_reg, _rm, 8); \
756+ if(_shift != 0) \
757+ { \
758+ mips_emit_ext(reg_c_cache, arm_to_mips_reg[_rm], (32 - _shift), 1); \
759+ mips_emit_sll(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
760+ _rm = arm_reg; \
761+ } \
762+
763+#define generate_shift_imm_lsr_flags(arm_reg, _rm, _shift) \
764+ check_load_reg_pc(arm_reg, _rm, 8); \
765+ if(_shift != 0) \
766+ { \
767+ mips_emit_ext(reg_c_cache, arm_to_mips_reg[_rm], (_shift - 1), 1); \
768+ mips_emit_srl(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
769+ } \
770+ else \
771+ { \
772+ mips_emit_srl(reg_c_cache, arm_to_mips_reg[_rm], 31); \
773+ mips_emit_addu(arm_to_mips_reg[arm_reg], reg_zero, reg_zero); \
774+ } \
775+ _rm = arm_reg \
776+
777+#define generate_shift_imm_asr_flags(arm_reg, _rm, _shift) \
778+ check_load_reg_pc(arm_reg, _rm, 8); \
779+ if(_shift != 0) \
780+ { \
781+ mips_emit_ext(reg_c_cache, arm_to_mips_reg[_rm], (_shift - 1), 1); \
782+ mips_emit_sra(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
783+ } \
784+ else \
785+ { \
786+ mips_emit_sra(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], 31); \
787+ mips_emit_andi(reg_c_cache, arm_to_mips_reg[arm_reg], 1); \
788+ } \
789+ _rm = arm_reg \
790+
791+#define generate_shift_imm_ror_flags(arm_reg, _rm, _shift) \
792+ check_load_reg_pc(arm_reg, _rm, 8); \
793+ if(_shift != 0) \
794+ { \
795+ mips_emit_ext(reg_c_cache, arm_to_mips_reg[_rm], (_shift - 1), 1); \
796+ mips_emit_rotr(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], _shift); \
797+ } \
798+ else \
799+ { \
800+ mips_emit_andi(reg_temp, arm_to_mips_reg[_rm], 1); \
801+ mips_emit_srl(arm_to_mips_reg[arm_reg], arm_to_mips_reg[_rm], 1); \
802+ mips_emit_ins(arm_to_mips_reg[arm_reg], reg_c_cache, 31, 1); \
803+ mips_emit_addu(reg_c_cache, reg_temp, reg_zero); \
804+ } \
805+ _rm = arm_reg \
806+
807+#define generate_shift_reg_lsl_no_flags(_rm, _rs) \
808+ mips_emit_sltiu(reg_temp, arm_to_mips_reg[_rs], 32); \
809+ mips_emit_sllv(reg_a0, arm_to_mips_reg[_rm], arm_to_mips_reg[_rs]); \
810+ mips_emit_movz(reg_a0, reg_zero, reg_temp) \
811+
812+#define generate_shift_reg_lsr_no_flags(_rm, _rs) \
813+ mips_emit_sltiu(reg_temp, arm_to_mips_reg[_rs], 32); \
814+ mips_emit_srlv(reg_a0, arm_to_mips_reg[_rm], arm_to_mips_reg[_rs]); \
815+ mips_emit_movz(reg_a0, reg_zero, reg_temp) \
816+
817+#define generate_shift_reg_asr_no_flags(_rm, _rs) \
818+ mips_emit_sltiu(reg_temp, arm_to_mips_reg[_rs], 32); \
819+ mips_emit_b(bne, reg_temp, reg_zero, 2); \
820+ mips_emit_srav(reg_a0, arm_to_mips_reg[_rm], arm_to_mips_reg[_rs]); \
821+ mips_emit_sra(reg_a0, reg_a0, 31) \
822+
823+#define generate_shift_reg_ror_no_flags(_rm, _rs) \
824+ mips_emit_rotrv(reg_a0, arm_to_mips_reg[_rm], arm_to_mips_reg[_rs]) \
825+
826+#define generate_shift_reg_lsl_flags(_rm, _rs) \
827+ generate_load_reg_pc(reg_a0, _rm, 12); \
828+ generate_load_reg_pc(reg_a1, _rs, 8); \
829+ generate_function_call_swap_delay(execute_lsl_flags_reg) \
830+
831+#define generate_shift_reg_lsr_flags(_rm, _rs) \
832+ generate_load_reg_pc(reg_a0, _rm, 12); \
833+ generate_load_reg_pc(reg_a1, _rs, 8); \
834+ generate_function_call_swap_delay(execute_lsr_flags_reg) \
835+
836+#define generate_shift_reg_asr_flags(_rm, _rs) \
837+ generate_load_reg_pc(reg_a0, _rm, 12); \
838+ generate_load_reg_pc(reg_a1, _rs, 8); \
839+ generate_function_call_swap_delay(execute_asr_flags_reg) \
840+
841+#define generate_shift_reg_ror_flags(_rm, _rs) \
842+ mips_emit_b(beq, arm_to_mips_reg[_rs], reg_zero, 3); \
843+ mips_emit_addiu(reg_temp, arm_to_mips_reg[_rs], -1); \
844+ mips_emit_srlv(reg_temp, arm_to_mips_reg[_rm], reg_temp); \
845+ mips_emit_andi(reg_c_cache, reg_temp, 1); \
846+ mips_emit_rotrv(reg_a0, arm_to_mips_reg[_rm], arm_to_mips_reg[_rs]) \
847+
848+#define generate_shift_imm(arm_reg, name, flags_op) \
849+ u32 shift = ((opcode >> 7) & 0x1F); \
850+ generate_shift_imm_##name##_##flags_op(arm_reg, rm, shift) \
851+
852+#define generate_shift_reg(arm_reg, name, flags_op) \
853+ u32 rs = ((opcode >> 8) & 0x0F); \
854+ generate_shift_reg_##name##_##flags_op(rm, rs); \
855+ rm = arm_reg \
856+
857+// Made functions due to the macro expansion getting too large.
858+// Returns a new rm if it redirects it (which will happen on most of these
859+// cases)
860+
861+#define generate_load_rm_sh_builder(flags_op) \
862+u32 generate_load_rm_sh_##flags_op(u32 rm) \
863+{ \
864+ switch((opcode >> 4) & 0x07) \
865+ { \
866+ /* LSL imm */ \
867+ case 0x0: \
868+ { \
869+ generate_shift_imm(arm_reg_a0, lsl, flags_op); \
870+ break; \
871+ } \
872+ \
873+ /* LSL reg */ \
874+ case 0x1: \
875+ { \
876+ generate_shift_reg(arm_reg_a0, lsl, flags_op); \
877+ break; \
878+ } \
879+ \
880+ /* LSR imm */ \
881+ case 0x2: \
882+ { \
883+ generate_shift_imm(arm_reg_a0, lsr, flags_op); \
884+ break; \
885+ } \
886+ \
887+ /* LSR reg */ \
888+ case 0x3: \
889+ { \
890+ generate_shift_reg(arm_reg_a0, lsr, flags_op); \
891+ break; \
892+ } \
893+ \
894+ /* ASR imm */ \
895+ case 0x4: \
896+ { \
897+ generate_shift_imm(arm_reg_a0, asr, flags_op); \
898+ break; \
899+ } \
900+ \
901+ /* ASR reg */ \
902+ case 0x5: \
903+ { \
904+ generate_shift_reg(arm_reg_a0, asr, flags_op); \
905+ break; \
906+ } \
907+ \
908+ /* ROR imm */ \
909+ case 0x6: \
910+ { \
911+ generate_shift_imm(arm_reg_a0, ror, flags_op); \
912+ break; \
913+ } \
914+ \
915+ /* ROR reg */ \
916+ case 0x7: \
917+ { \
918+ generate_shift_reg(arm_reg_a0, ror, flags_op); \
919+ break; \
920+ } \
921+ } \
922+ \
923+ return rm; \
924+} \
925+
926+#define read_memory_constant_u8(address) \
927+ read_memory8(address) \
928+
929+#define read_memory_constant_u16(address) \
930+ read_memory16(address) \
931+
932+#define read_memory_constant_u32(address) \
933+ read_memory32(address) \
934+
935+#define read_memory_constant_s8(address) \
936+ (s8)read_memory8(address) \
937+
938+#define read_memory_constant_s16(address) \
939+ (s16)read_memory16_signed(address) \
940+
941+#define generate_load_memory_u8(ireg, offset) \
942+ mips_emit_lbu(ireg, ireg, offset) \
943+
944+#define generate_load_memory_u16(ireg, offset) \
945+ mips_emit_lhu(ireg, ireg, offset) \
946+
947+#define generate_load_memory_u32(ireg, offset) \
948+ mips_emit_lw(ireg, ireg, offset) \
949+
950+#define generate_load_memory_s8(ireg, offset) \
951+ mips_emit_lb(ireg, ireg, offset) \
952+
953+#define generate_load_memory_s16(ireg, offset) \
954+ mips_emit_lh(ireg, ireg, offset) \
955+
956+#define generate_load_memory(type, ireg, address) \
957+{ \
958+ u32 _address = (u32)(address); \
959+ u32 _address_hi = (_address + 0x8000) >> 16; \
960+ generate_load_imm(ireg, address); \
961+ mips_emit_lui(ireg, _address_hi >> 16); \
962+ generate_load_memory_##type(ireg, _address - (_address_hi << 16)); \
963+} \
964+
965+#define generate_known_address_load_builder(type) \
966+ u32 generate_known_address_load_##type(u32 rd, u32 address) \
967+ { \
968+ switch(address >> 24) \
969+ { \
970+ /* Read from the BIOS ROM, can be converted to an immediate load. \
971+ Only really possible to do this from the BIOS but should be okay \
972+ to allow it everywhere */ \
973+ case 0x00: \
974+ u32 imm = read_memory_constant_##type(address); \
975+ generate_load_imm(arm_to_mips_reg[rd], imm); \
976+ return 1; \
977+ \
978+ /* Read from RAM, can be converted to a load */ \
979+ case 0x02: \
980+ generate_load_memory(type, arm_to_mips_reg[rd], (u8 *)ewram + \
981+ (address & 0x7FFF) + ((address & 0x38000) * 2) + 0x8000); \
982+ return 1; \
983+ \
984+ case 0x03: \
985+ generate_load_memory(type, arm_to_mips_reg[rd], (u8 *)iwram + \
986+ (address & 0x7FFF) + 0x8000); \
987+ return 1; \
988+ \
989+ /* Read from gamepak ROM, this has to be an immediate load because \
990+ it might not actually be in memory anymore when we get to it. */ \
991+ case 0x08: \
992+ u32 imm = read_memory_constant_##type(address); \
993+ generate_load_imm(arm_to_mips_reg[rd], imm); \
994+ return 1; \
995+ \
996+ default: \
997+ return 0; \
998+ } \
999+ } \
1000+
1001+#define generate_block_extra_vars() \
1002+ u32 stored_pc = pc; \
1003+ u8 *update_trampoline \
1004+
1005+#define generate_block_extra_vars_arm() \
1006+ generate_block_extra_vars(); \
1007+ generate_load_rm_sh_builder(flags); \
1008+ generate_load_rm_sh_builder(no_flags); \
1009+ \
1010+/* generate_known_address_load_builder(u8); \
1011+ generate_known_address_load_builder(u16); \
1012+ generate_known_address_load_builder(u32); \
1013+ generate_known_address_load_builder(s8); \
1014+ generate_known_address_load_builder(s16); */ \
1015+ \
1016+ u32 generate_load_offset_sh(u32 rm) \
1017+ { \
1018+ switch((opcode >> 5) & 0x03) \
1019+ { \
1020+ /* LSL imm */ \
1021+ case 0x0: \
1022+ { \
1023+ generate_shift_imm(arm_reg_a1, lsl, no_flags); \
1024+ break; \
1025+ } \
1026+ \
1027+ /* LSR imm */ \
1028+ case 0x1: \
1029+ { \
1030+ generate_shift_imm(arm_reg_a1, lsr, no_flags); \
1031+ break; \
1032+ } \
1033+ \
1034+ /* ASR imm */ \
1035+ case 0x2: \
1036+ { \
1037+ generate_shift_imm(arm_reg_a1, asr, no_flags); \
1038+ break; \
1039+ } \
1040+ \
1041+ /* ROR imm */ \
1042+ case 0x3: \
1043+ { \
1044+ generate_shift_imm(arm_reg_a1, ror, no_flags); \
1045+ break; \
1046+ } \
1047+ } \
1048+ \
1049+ return rm; \
1050+ } \
1051+ \
1052+ void generate_indirect_branch_arm() \
1053+ { \
1054+ if(condition == 0x0E) \
1055+ { \
1056+ generate_indirect_branch_cycle_update(arm); \
1057+ } \
1058+ else \
1059+ { \
1060+ generate_indirect_branch_no_cycle_update(arm); \
1061+ } \
1062+ } \
1063+ \
1064+ void generate_indirect_branch_dual() \
1065+ { \
1066+ if(condition == 0x0E) \
1067+ { \
1068+ generate_indirect_branch_cycle_update(dual); \
1069+ } \
1070+ else \
1071+ { \
1072+ generate_indirect_branch_no_cycle_update(dual); \
1073+ } \
1074+ } \
1075+
1076+#define generate_block_extra_vars_thumb() \
1077+ generate_block_extra_vars() \
1078+
1079+// It should be okay to still generate result flags, spsr will overwrite them.
1080+// This is pretty infrequent (returning from interrupt handlers, et al) so
1081+// probably not worth optimizing for.
1082+
1083+u32 execute_spsr_restore_body(u32 address)
1084+{
1085+ set_cpu_mode(cpu_modes[reg[REG_CPSR] & 0x1F]);
1086+
1087+ if((io_registers[REG_IE] & io_registers[REG_IF]) &&
1088+ io_registers[REG_IME] && ((reg[REG_CPSR] & 0x80) == 0))
1089+ {
1090+ reg_mode[MODE_IRQ][6] = address + 4;
1091+ spsr[MODE_IRQ] = reg[REG_CPSR];
1092+ reg[REG_CPSR] = (reg[REG_CPSR] & ~0xFF) | 0xD2;
1093+ set_cpu_mode(MODE_IRQ);
1094+
1095+ address = 0x00000018;
1096+ }
1097+
1098+ if(reg[REG_CPSR] & 0x20)
1099+ {
1100+ address |= 0x01;
1101+ }
1102+
1103+ return address;
1104+}
1105+
1106+typedef enum
1107+{
1108+ CONDITION_TRUE,
1109+ CONDITION_FALSE,
1110+ CONDITION_EQUAL,
1111+ CONDITION_NOT_EQUAL
1112+} CONDITION_CHECK_TYPE;
1113+
1114+
1115+#define generate_condition_eq() \
1116+ mips_emit_b_filler(beq, reg_z_cache, reg_zero, backpatch_address); \
1117+ generate_cycle_update_force() \
1118+
1119+#define generate_condition_ne() \
1120+ mips_emit_b_filler(bne, reg_z_cache, reg_zero, backpatch_address); \
1121+ generate_cycle_update_force() \
1122+
1123+#define generate_condition_cs() \
1124+ mips_emit_b_filler(beq, reg_c_cache, reg_zero, backpatch_address); \
1125+ generate_cycle_update_force() \
1126+
1127+#define generate_condition_cc() \
1128+ mips_emit_b_filler(bne, reg_c_cache, reg_zero, backpatch_address); \
1129+ generate_cycle_update_force() \
1130+
1131+#define generate_condition_mi() \
1132+ mips_emit_b_filler(beq, reg_n_cache, reg_zero, backpatch_address); \
1133+ generate_cycle_update_force() \
1134+
1135+#define generate_condition_pl() \
1136+ mips_emit_b_filler(bne, reg_n_cache, reg_zero, backpatch_address); \
1137+ generate_cycle_update_force() \
1138+
1139+#define generate_condition_vs() \
1140+ mips_emit_b_filler(beq, reg_v_cache, reg_zero, backpatch_address); \
1141+ generate_cycle_update_force() \
1142+
1143+#define generate_condition_vc() \
1144+ mips_emit_b_filler(bne, reg_v_cache, reg_zero, backpatch_address); \
1145+ generate_cycle_update_force() \
1146+
1147+#define generate_condition_hi() \
1148+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1149+ mips_emit_or(reg_temp, reg_temp, reg_z_cache); \
1150+ mips_emit_b_filler(bne, reg_temp, reg_zero, backpatch_address); \
1151+ generate_cycle_update_force() \
1152+
1153+#define generate_condition_ls() \
1154+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1155+ mips_emit_or(reg_temp, reg_temp, reg_z_cache); \
1156+ mips_emit_b_filler(beq, reg_temp, reg_zero, backpatch_address); \
1157+ generate_cycle_update_force() \
1158+
1159+#define generate_condition_ge() \
1160+ mips_emit_b_filler(bne, reg_n_cache, reg_v_cache, backpatch_address); \
1161+ generate_cycle_update_force() \
1162+
1163+#define generate_condition_lt() \
1164+ mips_emit_b_filler(beq, reg_n_cache, reg_v_cache, backpatch_address); \
1165+ generate_cycle_update_force() \
1166+
1167+#define generate_condition_gt() \
1168+ mips_emit_xor(reg_temp, reg_n_cache, reg_v_cache); \
1169+ mips_emit_or(reg_temp, reg_temp, reg_z_cache); \
1170+ mips_emit_b_filler(bne, reg_temp, reg_zero, backpatch_address); \
1171+ generate_cycle_update_force() \
1172+
1173+#define generate_condition_le() \
1174+ mips_emit_xor(reg_temp, reg_n_cache, reg_v_cache); \
1175+ mips_emit_or(reg_temp, reg_temp, reg_z_cache); \
1176+ mips_emit_b_filler(beq, reg_temp, reg_zero, backpatch_address); \
1177+ generate_cycle_update_force() \
1178+
1179+#define generate_condition() \
1180+ switch(condition) \
1181+ { \
1182+ case 0x0: \
1183+ generate_condition_eq(); \
1184+ break; \
1185+ \
1186+ case 0x1: \
1187+ generate_condition_ne(); \
1188+ break; \
1189+ \
1190+ case 0x2: \
1191+ generate_condition_cs(); \
1192+ break; \
1193+ \
1194+ case 0x3: \
1195+ generate_condition_cc(); \
1196+ break; \
1197+ \
1198+ case 0x4: \
1199+ generate_condition_mi(); \
1200+ break; \
1201+ \
1202+ case 0x5: \
1203+ generate_condition_pl(); \
1204+ break; \
1205+ \
1206+ case 0x6: \
1207+ generate_condition_vs(); \
1208+ break; \
1209+ \
1210+ case 0x7: \
1211+ generate_condition_vc(); \
1212+ break; \
1213+ \
1214+ case 0x8: \
1215+ generate_condition_hi(); \
1216+ break; \
1217+ \
1218+ case 0x9: \
1219+ generate_condition_ls(); \
1220+ break; \
1221+ \
1222+ case 0xA: \
1223+ generate_condition_ge(); \
1224+ break; \
1225+ \
1226+ case 0xB: \
1227+ generate_condition_lt(); \
1228+ break; \
1229+ \
1230+ case 0xC: \
1231+ generate_condition_gt(); \
1232+ break; \
1233+ \
1234+ case 0xD: \
1235+ generate_condition_le(); \
1236+ break; \
1237+ \
1238+ case 0xE: \
1239+ break; \
1240+ \
1241+ case 0xF: \
1242+ break; \
1243+ } \
1244+
1245+#define generate_branch() \
1246+{ \
1247+ if(condition == 0x0E) \
1248+ { \
1249+ generate_branch_cycle_update( \
1250+ block_exits[block_exit_position].branch_source, \
1251+ block_exits[block_exit_position].branch_target); \
1252+ } \
1253+ else \
1254+ { \
1255+ generate_branch_no_cycle_update( \
1256+ block_exits[block_exit_position].branch_source, \
1257+ block_exits[block_exit_position].branch_target); \
1258+ } \
1259+ block_exit_position++; \
1260+} \
1261+
1262+#define generate_op_and_reg(_rd, _rn, _rm) \
1263+ mips_emit_and(_rd, _rn, _rm) \
1264+
1265+#define generate_op_orr_reg(_rd, _rn, _rm) \
1266+ mips_emit_or(_rd, _rn, _rm) \
1267+
1268+#define generate_op_eor_reg(_rd, _rn, _rm) \
1269+ mips_emit_xor(_rd, _rn, _rm) \
1270+
1271+#define generate_op_bic_reg(_rd, _rn, _rm) \
1272+ mips_emit_nor(reg_temp, _rm, reg_zero); \
1273+ mips_emit_and(_rd, _rn, reg_temp) \
1274+
1275+#define generate_op_sub_reg(_rd, _rn, _rm) \
1276+ mips_emit_subu(_rd, _rn, _rm) \
1277+
1278+#define generate_op_rsb_reg(_rd, _rn, _rm) \
1279+ mips_emit_subu(_rd, _rm, _rn) \
1280+
1281+/* SBC Rd = Rn - <Oprnd> - NOT(Carry) */
1282+#define generate_op_sbc_reg(_rd, _rn, _rm) \
1283+ mips_emit_subu(_rd, _rn, _rm); \
1284+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1285+ mips_emit_subu(_rd, _rd, reg_temp) \
1286+
1287+/* RSC Rd = <Oprnd> - Rn - NOT(Carry) */
1288+#define generate_op_rsc_reg(_rd, _rn, _rm) \
1289+ mips_emit_subu(_rd, _rm, _rn); \
1290+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1291+ mips_emit_subu(_rd, _rd, reg_temp) \
1292+
1293+#define generate_op_add_reg(_rd, _rn, _rm) \
1294+ mips_emit_addu(_rd, _rn, _rm) \
1295+
1296+#define generate_op_adc_reg(_rd, _rn, _rm) \
1297+ mips_emit_addu(reg_temp, _rm, reg_c_cache); \
1298+ mips_emit_addu(_rd, _rn, reg_temp) \
1299+
1300+#define generate_op_mov_reg(_rd, _rn, _rm) \
1301+ mips_emit_addu(_rd, _rm, reg_zero) \
1302+
1303+#define generate_op_mvn_reg(_rd, _rn, _rm) \
1304+ mips_emit_nor(_rd, _rm, reg_zero) \
1305+
1306+#define generate_op_imm_wrapper(name, _rd, _rn) \
1307+ if(imm != 0) \
1308+ { \
1309+ generate_load_imm(reg_a0, imm); \
1310+ generate_op_##name##_reg(_rd, _rn, reg_a0); \
1311+ } \
1312+ else \
1313+ { \
1314+ generate_op_##name##_reg(_rd, _rn, reg_zero); \
1315+ } \
1316+
1317+#define generate_op_and_imm(_rd, _rn) \
1318+ generate_alu_immu(andi, and, _rd, _rn, imm) \
1319+
1320+#define generate_op_orr_imm(_rd, _rn) \
1321+ generate_alu_immu(ori, or, _rd, _rn, imm) \
1322+
1323+#define generate_op_eor_imm(_rd, _rn) \
1324+ generate_alu_immu(xori, xor, _rd, _rn, imm) \
1325+
1326+#define generate_op_bic_imm(_rd, _rn) \
1327+ generate_alu_immu(andi, and, _rd, _rn, (~imm)) \
1328+
1329+#define generate_op_sub_imm(_rd, _rn) \
1330+ generate_alu_imm(addiu, addu, _rd, _rn, (-imm)) \
1331+
1332+#define generate_op_rsb_imm(_rd, _rn) \
1333+ if(imm != 0) \
1334+ { \
1335+ generate_load_imm(reg_temp, imm); \
1336+ mips_emit_subu(_rd, reg_temp, _rn); \
1337+ } \
1338+ else \
1339+ { \
1340+ mips_emit_subu(_rd, reg_zero, _rn); \
1341+ } \
1342+
1343+#define generate_op_sbc_imm(_rd, _rn) \
1344+ generate_op_imm_wrapper(sbc, _rd, _rn) \
1345+
1346+#define generate_op_rsc_imm(_rd, _rn) \
1347+ generate_op_imm_wrapper(rsc, _rd, _rn) \
1348+
1349+#define generate_op_add_imm(_rd, _rn) \
1350+ generate_alu_imm(addiu, addu, _rd, _rn, imm) \
1351+
1352+#define generate_op_adc_imm(_rd, _rn) \
1353+ generate_op_imm_wrapper(adc, _rd, _rn) \
1354+
1355+#define generate_op_mov_imm(_rd, _rn) \
1356+ generate_load_imm(_rd, imm) \
1357+
1358+#define generate_op_mvn_imm(_rd, _rn) \
1359+ generate_load_imm(_rd, (~imm)) \
1360+
1361+#define generate_op_logic_flags(_rd) \
1362+ if(check_generate_n_flag) \
1363+ { \
1364+ mips_emit_srl(reg_n_cache, _rd, 31); \
1365+ } \
1366+ if(check_generate_z_flag) \
1367+ { \
1368+ mips_emit_sltiu(reg_z_cache, _rd, 1); \
1369+ } \
1370+
1371+#define generate_op_sub_flags_prologue(_rn, _rm) \
1372+ if(check_generate_c_flag) \
1373+ { \
1374+ mips_emit_sltu(reg_c_cache, _rn, _rm); \
1375+ mips_emit_xori(reg_c_cache, reg_c_cache, 1); \
1376+ } \
1377+ if(check_generate_v_flag) \
1378+ { \
1379+ mips_emit_slt(reg_v_cache, _rn, _rm); \
1380+ } \
1381+
1382+#define generate_op_sub_flags_epilogue(_rd) \
1383+ generate_op_logic_flags(_rd); \
1384+ if(check_generate_v_flag) \
1385+ { \
1386+ if(!check_generate_n_flag) \
1387+ { \
1388+ mips_emit_srl(reg_n_cache, _rd, 31); \
1389+ } \
1390+ mips_emit_xor(reg_v_cache, reg_v_cache, reg_n_cache); \
1391+ } \
1392+
1393+#define generate_add_flags_prologue(_rn, _rm) \
1394+ if(check_generate_c_flag | check_generate_v_flag) \
1395+ { \
1396+ mips_emit_addu(reg_c_cache, _rn, reg_zero); \
1397+ } \
1398+ if(check_generate_v_flag) \
1399+ { \
1400+ mips_emit_slt(reg_v_cache, _rm, reg_zero); \
1401+ } \
1402+
1403+#define generate_add_flags_epilogue(_rd) \
1404+ if(check_generate_v_flag) \
1405+ { \
1406+ mips_emit_slt(reg_a0, _rd, reg_c_cache); \
1407+ mips_emit_xor(reg_v_cache, reg_v_cache, reg_a0); \
1408+ } \
1409+ if(check_generate_c_flag) \
1410+ { \
1411+ mips_emit_sltu(reg_c_cache, _rd, reg_c_cache); \
1412+ } \
1413+ generate_op_logic_flags(_rd) \
1414+
1415+#define generate_op_ands_reg(_rd, _rn, _rm) \
1416+ mips_emit_and(_rd, _rn, _rm); \
1417+ generate_op_logic_flags(_rd) \
1418+
1419+#define generate_op_orrs_reg(_rd, _rn, _rm) \
1420+ mips_emit_or(_rd, _rn, _rm); \
1421+ generate_op_logic_flags(_rd) \
1422+
1423+#define generate_op_eors_reg(_rd, _rn, _rm) \
1424+ mips_emit_xor(_rd, _rn, _rm); \
1425+ generate_op_logic_flags(_rd) \
1426+
1427+#define generate_op_bics_reg(_rd, _rn, _rm) \
1428+ mips_emit_nor(reg_temp, _rm, reg_zero); \
1429+ mips_emit_and(_rd, _rn, reg_temp); \
1430+ generate_op_logic_flags(_rd) \
1431+
1432+#define generate_op_subs_reg(_rd, _rn, _rm) \
1433+ generate_op_sub_flags_prologue(_rn, _rm); \
1434+ mips_emit_subu(_rd, _rn, _rm); \
1435+ generate_op_sub_flags_epilogue(_rd) \
1436+
1437+#define generate_op_rsbs_reg(_rd, _rn, _rm) \
1438+ generate_op_sub_flags_prologue(_rm, _rn); \
1439+ mips_emit_subu(_rd, _rm, _rn); \
1440+ generate_op_sub_flags_epilogue(_rd) \
1441+
1442+/* SBCS Rd = Rn - <Oprnd> - NOT(Carry) */
1443+#define generate_op_sbcs_reg(_rd, _rn, _rm) \
1444+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1445+ mips_emit_addu(reg_temp, _rm, reg_temp); \
1446+ generate_op_sub_flags_prologue(_rn, reg_temp); \
1447+ mips_emit_subu(_rd, _rn, reg_temp); \
1448+ generate_op_sub_flags_epilogue(_rd) \
1449+
1450+/* RSCS Rd = <Oprnd> - Rn - NOT(Carry) */
1451+#define generate_op_rscs_reg(_rd, _rn, _rm) \
1452+ mips_emit_xori(reg_temp, reg_c_cache, 1); \
1453+ mips_emit_addu(reg_temp, _rn, reg_temp); \
1454+ generate_op_sub_flags_prologue(_rm, reg_temp); \
1455+ mips_emit_subu(_rd, _rm, reg_temp); \
1456+ generate_op_sub_flags_epilogue(_rd) \
1457+
1458+#define generate_op_adds_reg(_rd, _rn, _rm) \
1459+ generate_add_flags_prologue(_rn, _rm); \
1460+ mips_emit_addu(_rd, _rn, _rm); \
1461+ generate_add_flags_epilogue(_rd) \
1462+
1463+#define generate_op_adcs_reg(_rd, _rn, _rm) \
1464+ mips_emit_addu(reg_temp, _rm, reg_c_cache); \
1465+ generate_add_flags_prologue(_rn, reg_temp); \
1466+ mips_emit_addu(_rd, _rn, reg_temp); \
1467+ generate_add_flags_epilogue(_rd) \
1468+
1469+#define generate_op_movs_reg(_rd, _rn, _rm) \
1470+ mips_emit_addu(_rd, _rm, reg_zero); \
1471+ generate_op_logic_flags(_rd) \
1472+
1473+#define generate_op_mvns_reg(_rd, _rn, _rm) \
1474+ mips_emit_nor(_rd, _rm, reg_zero); \
1475+ generate_op_logic_flags(_rd) \
1476+
1477+#define generate_op_neg_reg(_rd, _rn, _rm) \
1478+ generate_op_subs_reg(_rd, reg_zero, _rm) \
1479+
1480+#define generate_op_muls_reg(_rd, _rn, _rm) \
1481+ mips_emit_multu(_rn, _rm); \
1482+ mips_emit_mflo(_rd); \
1483+ generate_op_logic_flags(_rd) \
1484+
1485+#define generate_op_cmp_reg(_rd, _rn, _rm) \
1486+ generate_op_subs_reg(reg_temp, _rn, _rm) \
1487+
1488+#define generate_op_cmn_reg(_rd, _rn, _rm) \
1489+ generate_op_adds_reg(reg_temp, _rn, _rm) \
1490+
1491+#define generate_op_tst_reg(_rd, _rn, _rm) \
1492+ generate_op_ands_reg(reg_temp, _rn, _rm) \
1493+
1494+#define generate_op_teq_reg(_rd, _rn, _rm) \
1495+ generate_op_eors_reg(reg_temp, _rn, _rm) \
1496+
1497+#define generate_op_ands_imm(_rd, _rn) \
1498+ generate_alu_immu(andi, and, _rd, _rn, imm); \
1499+ generate_op_logic_flags(_rd) \
1500+
1501+#define generate_op_orrs_imm(_rd, _rn) \
1502+ generate_alu_immu(ori, or, _rd, _rn, imm); \
1503+ generate_op_logic_flags(_rd) \
1504+
1505+#define generate_op_eors_imm(_rd, _rn) \
1506+ generate_alu_immu(xori, xor, _rd, _rn, imm); \
1507+ generate_op_logic_flags(_rd) \
1508+
1509+#define generate_op_bics_imm(_rd, _rn) \
1510+ generate_alu_immu(andi, and, _rd, _rn, (~imm)); \
1511+ generate_op_logic_flags(_rd) \
1512+
1513+#define generate_op_subs_imm(_rd, _rn) \
1514+ generate_op_imm_wrapper(subs, _rd, _rn) \
1515+
1516+#define generate_op_rsbs_imm(_rd, _rn) \
1517+ generate_op_imm_wrapper(rsbs, _rd, _rn) \
1518+
1519+#define generate_op_sbcs_imm(_rd, _rn) \
1520+ generate_op_imm_wrapper(sbcs, _rd, _rn) \
1521+
1522+#define generate_op_rscs_imm(_rd, _rn) \
1523+ generate_op_imm_wrapper(rscs, _rd, _rn) \
1524+
1525+#define generate_op_adds_imm(_rd, _rn) \
1526+ generate_op_imm_wrapper(adds, _rd, _rn) \
1527+
1528+#define generate_op_adcs_imm(_rd, _rn) \
1529+ generate_op_imm_wrapper(adcs, _rd, _rn) \
1530+
1531+#define generate_op_movs_imm(_rd, _rn) \
1532+ generate_load_imm(_rd, imm); \
1533+ generate_op_logic_flags(_rd) \
1534+
1535+#define generate_op_mvns_imm(_rd, _rn) \
1536+ generate_load_imm(_rd, (~imm)); \
1537+ generate_op_logic_flags(_rd) \
1538+
1539+#define generate_op_cmp_imm(_rd, _rn) \
1540+ generate_op_imm_wrapper(cmp, _rd, _rn) \
1541+
1542+#define generate_op_cmn_imm(_rd, _rn) \
1543+ generate_op_imm_wrapper(cmn, _rd, _rn) \
1544+
1545+#define generate_op_tst_imm(_rd, _rn) \
1546+ generate_op_ands_imm(reg_temp, _rn) \
1547+
1548+#define generate_op_teq_imm(_rd, _rn) \
1549+ generate_op_eors_imm(reg_temp, _rn) \
1550+
1551+#define arm_generate_op_load_yes() \
1552+ generate_load_reg_pc(reg_a1, rn, 8) \
1553+
1554+#define arm_generate_op_load_no() \
1555+
1556+#define arm_op_check_yes() \
1557+ check_load_reg_pc(arm_reg_a1, rn, 8) \
1558+
1559+#define arm_op_check_no() \
1560+
1561+#define arm_generate_op_reg_flags(name, load_op) \
1562+ arm_decode_data_proc_reg(); \
1563+ if(check_generate_c_flag) \
1564+ { \
1565+ rm = generate_load_rm_sh_flags(rm); \
1566+ } \
1567+ else \
1568+ { \
1569+ rm = generate_load_rm_sh_no_flags(rm); \
1570+ } \
1571+ arm_op_check_##load_op(); \
1572+ generate_op_##name##_reg(arm_to_mips_reg[rd], arm_to_mips_reg[rn], \
1573+ arm_to_mips_reg[rm]) \
1574+
1575+#define arm_generate_op_reg(name, load_op) \
1576+ arm_decode_data_proc_reg(); \
1577+ rm = generate_load_rm_sh_no_flags(rm); \
1578+ arm_op_check_##load_op(); \
1579+ generate_op_##name##_reg(arm_to_mips_reg[rd], arm_to_mips_reg[rn], \
1580+ arm_to_mips_reg[rm]) \
1581+
1582+#define arm_generate_op_imm(name, load_op) \
1583+ arm_decode_data_proc_imm(); \
1584+ arm_op_check_##load_op(); \
1585+ generate_op_##name##_imm(arm_to_mips_reg[rd], arm_to_mips_reg[rn]) \
1586+
1587+#define arm_generate_op_imm_flags(name, load_op) \
1588+ arm_generate_op_imm(name, load_op) \
1589+
1590+
1591+/* 1S+x+y */
1592+/* x=1I cycles if Op2 shifted-by-register. y=1S+1N cycles if Rd=R15. */
1593+#define cycle_arm_data_proc_reg() \
1594+ if(rd == reg_pc) \
1595+ { \
1596+ cycle_count += 3 + cycle_arm_base(pc); \
1597+ } \
1598+ else \
1599+ { \
1600+ cycle_count += 1 + cycle_arm_fetch(pc); \
1601+ } \
1602+
1603+#define cycle_arm_data_proc_reg_flags() \
1604+ cycle_arm_data_proc_reg() \
1605+
1606+#define cycle_arm_data_proc_imm() \
1607+ cycle_count += cycle_arm_base(pc); \
1608+ if(rd == reg_pc) \
1609+ { \
1610+ cycle_count += 2; \
1611+ } \
1612+
1613+#define cycle_arm_data_proc_imm_flags() \
1614+ cycle_arm_data_proc_imm() \
1615+
1616+/* 1S+x */
1617+#define cycle_arm_data_proc_test_reg() \
1618+ cycle_count += 1 + cycle_arm_fetch(pc) \
1619+
1620+#define cycle_arm_data_proc_test_reg_flags() \
1621+ cycle_arm_data_proc_test_reg() \
1622+
1623+#define cycle_arm_data_proc_test_imm() \
1624+ cycle_count += cycle_arm_base(pc) \
1625+
1626+#define cycle_arm_data_proc_test_imm_flags() \
1627+ cycle_arm_data_proc_test_imm() \
1628+
1629+/* AND, EOR, SUB, RSB, ADD, ADC, SBC, RSC, ORR, BIC 1S+x+y */
1630+#define arm_data_proc(name, type, flags_op) \
1631+{ \
1632+ arm_generate_op_##type(name, yes); \
1633+ cycle_arm_data_proc_##type(); \
1634+ check_store_reg_pc_##flags_op(rd); \
1635+} \
1636+
1637+/* TST, TEQ, CMP, CMN 1S+x */
1638+#define arm_data_proc_test(name, type) \
1639+{ \
1640+ arm_generate_op_##type(name, yes); \
1641+ cycle_arm_data_proc_test_##type(); \
1642+} \
1643+
1644+/* MOV, MVN 1S+x+y */
1645+#define arm_data_proc_unary(name, type, flags_op) \
1646+{ \
1647+ arm_generate_op_##type(name, no); \
1648+ cycle_arm_data_proc_##type(); \
1649+ check_store_reg_pc_##flags_op(rd); \
1650+} \
1651+
1652+
1653+#define arm_multiply_flags_yes(_rd) \
1654+ generate_op_logic_flags(_rd) \
1655+
1656+#define arm_multiply_flags_no(_rd) \
1657+
1658+#define arm_multiply_add_no() \
1659+ mips_emit_mflo(arm_to_mips_reg[rd]) \
1660+
1661+#define arm_multiply_add_yes() \
1662+ mips_emit_mflo(reg_temp); \
1663+ mips_emit_addu(arm_to_mips_reg[rd], reg_temp, arm_to_mips_reg[rn]) \
1664+
1665+/* 1S+mI */
1666+#define cycle_arm_multiply_add_no() \
1667+ cycle_count += cycle_arm_fetch(pc) + cycle_multiply(rs) \
1668+
1669+/* 1S+mI+1I */
1670+#define cycle_arm_multiply_add_yes() \
1671+ cycle_count += 1 + cycle_arm_fetch(pc) + cycle_multiply(rs) \
1672+
1673+#define arm_multiply(add_op, flags) \
1674+{ \
1675+ arm_decode_multiply(); \
1676+ cycle_arm_multiply_add_##add_op(); \
1677+ mips_emit_multu(arm_to_mips_reg[rm], arm_to_mips_reg[rs]); \
1678+ arm_multiply_add_##add_op(); \
1679+ arm_multiply_flags_##flags(arm_to_mips_reg[rd]); \
1680+} \
1681+
1682+#define arm_multiply_long_flags_yes(_rdlo, _rdhi) \
1683+ mips_emit_sltiu(reg_z_cache, _rdlo, 1); \
1684+ mips_emit_sltiu(reg_a0, _rdhi, 1); \
1685+ mips_emit_and(reg_z_cache, reg_z_cache, reg_a0); \
1686+ mips_emit_srl(reg_n_cache, _rdhi, 31); \
1687+
1688+#define arm_multiply_long_flags_no(_rdlo, _rdhi) \
1689+
1690+#define arm_multiply_long_add_yes(name) \
1691+ mips_emit_mtlo(arm_to_mips_reg[rdlo]); \
1692+ mips_emit_mthi(arm_to_mips_reg[rdhi]); \
1693+ generate_multiply_##name() \
1694+
1695+#define arm_multiply_long_add_no(name) \
1696+ generate_multiply_##name() \
1697+
1698+/* 1S+mI+1I */
1699+#define cycle_arm_multiply_long_add_no() \
1700+ cycle_count += 1 + cycle_arm_fetch(pc) + cycle_multiply(rs) \
1701+
1702+/* 1S+mI+2I */
1703+#define cycle_arm_multiply_long_add_yes() \
1704+ cycle_count += 2 + cycle_arm_fetch(pc) + cycle_multiply(rs) \
1705+
1706+#define arm_multiply_long(name, add_op, flags) \
1707+{ \
1708+ arm_decode_multiply_long(); \
1709+ cycle_arm_multiply_long_add_##add_op(); \
1710+ arm_multiply_long_add_##add_op(name); \
1711+ mips_emit_mflo(arm_to_mips_reg[rdlo]); \
1712+ mips_emit_mfhi(arm_to_mips_reg[rdhi]); \
1713+ arm_multiply_long_flags_##flags(arm_to_mips_reg[rdlo], \
1714+ arm_to_mips_reg[rdhi]); \
1715+} \
1716+
1717+#define arm_psr_read(op_type, psr_reg) \
1718+ generate_function_call(execute_read_##psr_reg); \
1719+ generate_store_reg(reg_rv, rd) \
1720+
1721+u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address)
1722+{
1723+ reg[REG_CPSR] = _cpsr;
1724+ if(store_mask & 0xFF)
1725+ {
1726+ set_cpu_mode(cpu_modes[_cpsr & 0x1F]);
1727+
1728+ if((io_registers[REG_IE] & io_registers[REG_IF]) &&
1729+ io_registers[REG_IME] && ((_cpsr & 0x80) == 0))
1730+ {
1731+ reg_mode[MODE_IRQ][6] = address + 4;
1732+ spsr[MODE_IRQ] = _cpsr;
1733+ reg[REG_CPSR] = (reg[REG_CPSR] & ~0xFF) | 0xD2;
1734+ set_cpu_mode(MODE_IRQ);
1735+
1736+ return 0x00000018;
1737+ }
1738+ }
1739+
1740+ return 0;
1741+}
1742+
1743+#define arm_psr_load_new_reg() \
1744+ generate_load_reg(reg_a0, rm) \
1745+
1746+#define arm_psr_load_new_imm() \
1747+ generate_load_imm(reg_a0, imm) \
1748+
1749+#define arm_psr_store(op_type, psr_reg) \
1750+ arm_psr_load_new_##op_type(); \
1751+ generate_load_imm(reg_a1, psr_masks[psr_field]); \
1752+ generate_load_pc(reg_a2, (pc + 4)); \
1753+ generate_function_call_swap_delay(execute_store_##psr_reg) \
1754+
1755+#define arm_psr(op_type, transfer_type, psr_reg) \
1756+{ \
1757+ arm_decode_psr_##op_type(); \
1758+ cycle_count += cycle_arm_base(pc); \
1759+ arm_psr_##transfer_type(op_type, psr_reg); \
1760+} \
1761+
1762+/* LDR: 1S+1N+1I. LDR PC: 2S+2N+1I. STR: 2N. */
1763+#define cycle_arm_access_memory_load_u32() \
1764+ cycle_count += \
1765+/* 1 + cycle_arm_fetch(pc) + waitstate_cycles_non_seq[1][reg_a0 >> 24]; */ \
1766+ cycle_arm_fetch(pc) + waitstate_cycles_non_seq[1][reg_a0 >> 24]; \
1767+ if(rd == reg_pc) \
1768+ { \
1769+ cycle_count += 2; \
1770+ } \
1771+
1772+#define cycle_arm_access_memory_load_u16() \
1773+ cycle_count += \
1774+/* 1 + cycle_arm_fetch(pc) + waitstate_cycles_non_seq[0][reg_a0 >> 24]; */ \
1775+ cycle_arm_fetch(pc) + waitstate_cycles_non_seq[0][reg_a0 >> 24]; \
1776+ if(rd == reg_pc) \
1777+ { \
1778+ cycle_count += 2; \
1779+ } \
1780+
1781+#define cycle_arm_access_memory_load_s16() \
1782+ cycle_arm_access_memory_load_u16() \
1783+
1784+#define cycle_arm_access_memory_load_u8() \
1785+ cycle_arm_access_memory_load_u16() \
1786+
1787+#define cycle_arm_access_memory_load_s8() \
1788+ cycle_arm_access_memory_load_u16() \
1789+
1790+#define cycle_arm_access_memory_store_u32() \
1791+ cycle_count += \
1792+ cycle_arm_fetch(pc) + waitstate_cycles_non_seq[1][reg_a0 >> 24] \
1793+
1794+#define cycle_arm_access_memory_store_u16() \
1795+ cycle_count += \
1796+ cycle_arm_fetch(pc) + waitstate_cycles_non_seq[0][reg_a0 >> 24] \
1797+
1798+#define cycle_arm_access_memory_store_s16() \
1799+ cycle_arm_access_memory_store_u16() \
1800+
1801+#define cycle_arm_access_memory_store_u8() \
1802+ cycle_arm_access_memory_store_u16() \
1803+
1804+#define cycle_arm_access_memory_store_s8() \
1805+ cycle_arm_access_memory_store_u16() \
1806+
1807+#define arm_access_memory_load(mem_type) \
1808+ cycle_arm_access_memory_load_##mem_type(); \
1809+ mips_emit_jal(mips_absolute_offset(execute_load_##mem_type)); \
1810+ generate_load_pc(reg_a1, (pc + 8)); \
1811+ generate_store_reg(reg_rv, rd); \
1812+ check_store_reg_pc_no_flags(rd) \
1813+
1814+#define arm_access_memory_store(mem_type) \
1815+ cycle_arm_access_memory_store_##mem_type(); \
1816+ generate_load_pc(reg_a2, (pc + 4)); \
1817+ generate_load_reg_pc(reg_a1, rd, 12); \
1818+ generate_function_call_swap_delay(execute_store_##mem_type) \
1819+
1820+#define arm_access_memory_reg_pre_up() \
1821+ mips_emit_addu(reg_a0, arm_to_mips_reg[rn], arm_to_mips_reg[rm]) \
1822+
1823+#define arm_access_memory_reg_pre_down() \
1824+ mips_emit_subu(reg_a0, arm_to_mips_reg[rn], arm_to_mips_reg[rm]) \
1825+
1826+#define arm_access_memory_reg_pre(adjust_dir) \
1827+ check_load_reg_pc(arm_reg_a0, rn, 8); \
1828+ arm_access_memory_reg_pre_##adjust_dir() \
1829+
1830+#define arm_access_memory_reg_pre_wb(adjust_dir) \
1831+ arm_access_memory_reg_pre(adjust_dir); \
1832+ generate_store_reg(reg_a0, rn) \
1833+
1834+#define arm_access_memory_reg_post_up() \
1835+ mips_emit_addu(arm_to_mips_reg[rn], arm_to_mips_reg[rn], \
1836+ arm_to_mips_reg[rm]) \
1837+
1838+#define arm_access_memory_reg_post_down() \
1839+ mips_emit_subu(arm_to_mips_reg[rn], arm_to_mips_reg[rn], \
1840+ arm_to_mips_reg[rm]) \
1841+
1842+#define arm_access_memory_reg_post(adjust_dir) \
1843+ generate_load_reg(reg_a0, rn); \
1844+ arm_access_memory_reg_post_##adjust_dir() \
1845+
1846+#define arm_access_memory_imm_pre_up() \
1847+ mips_emit_addiu(reg_a0, arm_to_mips_reg[rn], offset) \
1848+
1849+#define arm_access_memory_imm_pre_down() \
1850+ mips_emit_addiu(reg_a0, arm_to_mips_reg[rn], -offset) \
1851+
1852+#define arm_access_memory_imm_pre(adjust_dir) \
1853+ check_load_reg_pc(arm_reg_a0, rn, 8); \
1854+ arm_access_memory_imm_pre_##adjust_dir() \
1855+
1856+#define arm_access_memory_imm_pre_wb(adjust_dir) \
1857+ arm_access_memory_imm_pre(adjust_dir); \
1858+ generate_store_reg(reg_a0, rn) \
1859+
1860+#define arm_access_memory_imm_post_up() \
1861+ mips_emit_addiu(arm_to_mips_reg[rn], arm_to_mips_reg[rn], offset) \
1862+
1863+#define arm_access_memory_imm_post_down() \
1864+ mips_emit_addiu(arm_to_mips_reg[rn], arm_to_mips_reg[rn], -offset) \
1865+
1866+#define arm_access_memory_imm_post(adjust_dir) \
1867+ generate_load_reg(reg_a0, rn); \
1868+ arm_access_memory_imm_post_##adjust_dir() \
1869+
1870+#define arm_data_trans_reg(adjust_op, adjust_dir) \
1871+ arm_decode_data_trans_reg(); \
1872+ rm = generate_load_offset_sh(rm); \
1873+ arm_access_memory_reg_##adjust_op(adjust_dir) \
1874+
1875+#define arm_data_trans_imm(adjust_op, adjust_dir) \
1876+ arm_decode_data_trans_imm(); \
1877+ arm_access_memory_imm_##adjust_op(adjust_dir) \
1878+
1879+#define arm_data_trans_half_reg(adjust_op, adjust_dir) \
1880